diff --git a/README.md b/README.md new file mode 100644 index 00000000..44c1acfc --- /dev/null +++ b/README.md @@ -0,0 +1,9 @@ +# PandaDB + +Intelligent Graph Database. + + + +## Licensing + +PandaDB-2019 is an open source product licensed under GPLv3. diff --git a/aipm-library/pom.xml b/aipm-library/pom.xml new file mode 100644 index 00000000..7e26960f --- /dev/null +++ b/aipm-library/pom.xml @@ -0,0 +1,42 @@ + + + + parent + cn.pandadb + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + aipm-library + + + cn.pandadb + commons + ${pandadb.version} + compile + + + cn.pandadb + blob-commons + ${pandadb.version} + compile + + + org.scala-lang.modules + scala-parser-combinators_2.11 + + + org.apache.httpcomponents + httpmime + + + info.debatty + java-string-similarity + + + + \ No newline at end of file diff --git a/src/aipm/scala/cn/aipm/CommonBlobPropertyExtractor.scala b/aipm-library/src/main/scala/cn/aipm/CommonBlobPropertyExtractor.scala similarity index 84% rename from src/aipm/scala/cn/aipm/CommonBlobPropertyExtractor.scala rename to aipm-library/src/main/scala/cn/aipm/CommonBlobPropertyExtractor.scala index 8784455a..6fe1adcc 100644 --- a/src/aipm/scala/cn/aipm/CommonBlobPropertyExtractor.scala +++ b/aipm-library/src/main/scala/cn/aipm/CommonBlobPropertyExtractor.scala @@ -1,8 +1,8 @@ package cn.aipm -import cn.graiph.PropertyExtractor -import cn.graiph.blob.Blob -import cn.graiph.util.Configuration +import cn.pandadb.cypherplus.PropertyExtractor +import cn.pandadb.blob.Blob +import cn.pandadb.util.{ContextMap, Configuration} /** * Created by bluejoe on 2019/2/17. diff --git a/src/aipm/scala/cn/aipm/audio/AudioRecongnizer.scala b/aipm-library/src/main/scala/cn/aipm/audio/AudioRecongnizer.scala similarity index 83% rename from src/aipm/scala/cn/aipm/audio/AudioRecongnizer.scala rename to aipm-library/src/main/scala/cn/aipm/audio/AudioRecongnizer.scala index 7015547b..ad199c28 100644 --- a/src/aipm/scala/cn/aipm/audio/AudioRecongnizer.scala +++ b/aipm-library/src/main/scala/cn/aipm/audio/AudioRecongnizer.scala @@ -1,9 +1,8 @@ package cn.aipm.audio -import cn.graiph.PropertyExtractor +import cn.pandadb.cypherplus.PropertyExtractor import cn.aipm.service.ServiceInitializer -import cn.graiph.blob.Blob - +import cn.pandadb.blob.Blob class AudioRecongnizer extends PropertyExtractor with ServiceInitializer { diff --git a/src/aipm/scala/cn/aipm/image/DogOrCatClassifier.scala b/aipm-library/src/main/scala/cn/aipm/image/DogOrCatClassifier.scala similarity index 83% rename from src/aipm/scala/cn/aipm/image/DogOrCatClassifier.scala rename to aipm-library/src/main/scala/cn/aipm/image/DogOrCatClassifier.scala index c4430439..8e1bf893 100644 --- a/src/aipm/scala/cn/aipm/image/DogOrCatClassifier.scala +++ b/aipm-library/src/main/scala/cn/aipm/image/DogOrCatClassifier.scala @@ -1,8 +1,8 @@ package cn.aipm.image import cn.aipm.service.ServiceInitializer -import cn.graiph.PropertyExtractor -import cn.graiph.blob.Blob +import cn.pandadb.cypherplus.PropertyExtractor +import cn.pandadb.blob.Blob class DogOrCatClassifier extends PropertyExtractor with ServiceInitializer { diff --git a/src/aipm/scala/cn/aipm/image/FaceSimilarityComparator.scala b/aipm-library/src/main/scala/cn/aipm/image/FaceSimilarityComparator.scala similarity index 90% rename from src/aipm/scala/cn/aipm/image/FaceSimilarityComparator.scala rename to aipm-library/src/main/scala/cn/aipm/image/FaceSimilarityComparator.scala index 2ee74b73..d0a3144b 100644 --- a/src/aipm/scala/cn/aipm/image/FaceSimilarityComparator.scala +++ b/aipm-library/src/main/scala/cn/aipm/image/FaceSimilarityComparator.scala @@ -1,9 +1,8 @@ package cn.aipm.image -import cn.graiph.SetComparator +import cn.pandadb.cypherplus.SetComparator import cn.aipm.service.ServiceInitializer -import cn.graiph.blob.Blob - +import cn.pandadb.blob.Blob class FaceSimilarityComparator extends SetComparator with ServiceInitializer { diff --git a/src/aipm/scala/cn/aipm/image/ImageMetaDataExtractor.scala b/aipm-library/src/main/scala/cn/aipm/image/ImageMetaDataExtractor.scala similarity index 80% rename from src/aipm/scala/cn/aipm/image/ImageMetaDataExtractor.scala rename to aipm-library/src/main/scala/cn/aipm/image/ImageMetaDataExtractor.scala index 5874cd04..f73e22e7 100644 --- a/src/aipm/scala/cn/aipm/image/ImageMetaDataExtractor.scala +++ b/aipm-library/src/main/scala/cn/aipm/image/ImageMetaDataExtractor.scala @@ -2,9 +2,9 @@ package cn.aipm.image import javax.imageio.ImageIO -import cn.graiph.PropertyExtractor -import cn.graiph.blob.Blob -import cn.graiph.util.Configuration +import cn.pandadb.cypherplus.PropertyExtractor +import cn.pandadb.blob.Blob +import cn.pandadb.util.{ContextMap, Configuration} /** * Created by bluejoe on 2019/2/17. diff --git a/src/aipm/scala/cn/aipm/image/PlateNumberExtractor.scala b/aipm-library/src/main/scala/cn/aipm/image/PlateNumberExtractor.scala similarity index 84% rename from src/aipm/scala/cn/aipm/image/PlateNumberExtractor.scala rename to aipm-library/src/main/scala/cn/aipm/image/PlateNumberExtractor.scala index 30f0fc92..f9b0cd0c 100644 --- a/src/aipm/scala/cn/aipm/image/PlateNumberExtractor.scala +++ b/aipm-library/src/main/scala/cn/aipm/image/PlateNumberExtractor.scala @@ -1,9 +1,8 @@ package cn.aipm.image -import cn.graiph.PropertyExtractor +import cn.pandadb.cypherplus.PropertyExtractor import cn.aipm.service.ServiceInitializer -import cn.graiph.blob.Blob - +import cn.pandadb.blob.Blob class PlateNumberExtractor extends PropertyExtractor with ServiceInitializer { diff --git a/src/aipm/scala/cn/aipm/service/Services.scala b/aipm-library/src/main/scala/cn/aipm/service/Services.scala similarity index 97% rename from src/aipm/scala/cn/aipm/service/Services.scala rename to aipm-library/src/main/scala/cn/aipm/service/Services.scala index 636e0fc6..a71b25d3 100644 --- a/src/aipm/scala/cn/aipm/service/Services.scala +++ b/aipm-library/src/main/scala/cn/aipm/service/Services.scala @@ -2,8 +2,8 @@ package cn.aipm.service import java.io.InputStream -import cn.graiph.AnyComparator -import cn.graiph.util.{ConfigUtils, Configuration} +import cn.pandadb.cypherplus.AnyComparator +import cn.pandadb.util.{ContextMap, ConfigUtils, Configuration} import scala.collection.immutable.Map import scala.util.parsing.json.JSON import ConfigUtils._ @@ -47,7 +47,6 @@ class Services(private val _aipmHttpHostUrl: String) { } - def extractPlateNumber(img1InputStream: InputStream): String = { val serviceUrl = getServiceUrl("PlateNumber") @@ -112,7 +111,6 @@ class Services(private val _aipmHttpHostUrl: String) { } } - def segmentText(text: String): List[String] = { val serviceUrl = getServiceUrl("TextSegment") val contents = Map("text" -> text) diff --git a/src/aipm/scala/cn/aipm/service/WebUtils.scala b/aipm-library/src/main/scala/cn/aipm/service/WebUtils.scala similarity index 99% rename from src/aipm/scala/cn/aipm/service/WebUtils.scala rename to aipm-library/src/main/scala/cn/aipm/service/WebUtils.scala index 57075682..74a4079f 100644 --- a/src/aipm/scala/cn/aipm/service/WebUtils.scala +++ b/aipm-library/src/main/scala/cn/aipm/service/WebUtils.scala @@ -38,7 +38,6 @@ object WebUtils { resStr } - def doPost(reqUrl: String, strContents: Map[String, String] = Map(), fileContents: Map[String, File] = Map(), inStreamContents: Map[String, InputStream] = Map()): String = { var resStr = "" diff --git a/src/aipm/scala/cn/aipm/text/ChineseTokenizer.scala b/aipm-library/src/main/scala/cn/aipm/text/ChineseTokenizer.scala similarity index 89% rename from src/aipm/scala/cn/aipm/text/ChineseTokenizer.scala rename to aipm-library/src/main/scala/cn/aipm/text/ChineseTokenizer.scala index a50421d5..8dea4e7c 100644 --- a/src/aipm/scala/cn/aipm/text/ChineseTokenizer.scala +++ b/aipm-library/src/main/scala/cn/aipm/text/ChineseTokenizer.scala @@ -1,7 +1,7 @@ package cn.aipm.text import cn.aipm.service.ServiceInitializer -import cn.graiph.PropertyExtractor +import cn.pandadb.cypherplus.PropertyExtractor class ChineseTokenizer extends PropertyExtractor with ServiceInitializer { diff --git a/src/aipm/scala/cn/aipm/text/JaroStringSimilarity.scala b/aipm-library/src/main/scala/cn/aipm/text/JaroStringSimilarity.scala similarity index 91% rename from src/aipm/scala/cn/aipm/text/JaroStringSimilarity.scala rename to aipm-library/src/main/scala/cn/aipm/text/JaroStringSimilarity.scala index 55e33a36..afe8f8df 100644 --- a/src/aipm/scala/cn/aipm/text/JaroStringSimilarity.scala +++ b/aipm-library/src/main/scala/cn/aipm/text/JaroStringSimilarity.scala @@ -1,7 +1,7 @@ package cn.aipm.text -import cn.graiph.ValueComparator -import cn.graiph.util.Configuration +import cn.pandadb.cypherplus.ValueComparator +import cn.pandadb.util.{ContextMap, Configuration} import info.debatty.java.stringsimilarity.{Cosine, Jaccard, JaroWinkler} /** diff --git a/src/aipm/scala/cn/aipm/text/SentimentClassifier.scala b/aipm-library/src/main/scala/cn/aipm/text/SentimentClassifier.scala similarity index 89% rename from src/aipm/scala/cn/aipm/text/SentimentClassifier.scala rename to aipm-library/src/main/scala/cn/aipm/text/SentimentClassifier.scala index a6fc6be6..1aba5ac2 100644 --- a/src/aipm/scala/cn/aipm/text/SentimentClassifier.scala +++ b/aipm-library/src/main/scala/cn/aipm/text/SentimentClassifier.scala @@ -1,7 +1,7 @@ package cn.aipm.text import cn.aipm.service.ServiceInitializer -import cn.graiph.PropertyExtractor +import cn.pandadb.cypherplus.PropertyExtractor class SentimentClassifier extends PropertyExtractor with ServiceInitializer { diff --git a/src/graiph-database/scala/cn/graiph/util/ExternalProcess.scala b/aipm-library/src/main/scala/cn/aipm/util/ExternalProcess.scala similarity index 92% rename from src/graiph-database/scala/cn/graiph/util/ExternalProcess.scala rename to aipm-library/src/main/scala/cn/aipm/util/ExternalProcess.scala index f2029c6b..1db64203 100644 --- a/src/graiph-database/scala/cn/graiph/util/ExternalProcess.scala +++ b/aipm-library/src/main/scala/cn/aipm/util/ExternalProcess.scala @@ -1,8 +1,10 @@ -package cn.graiph.util +package cn.aipm.util import java.io.ByteArrayOutputStream -import cn.graiph.blob.Blob +import cn.pandadb.blob.Blob +import cn.pandadb.util.{StreamUtils, Logging} import StreamUtils._ +import cn.pandadb.util.Logging import org.apache.commons.io.IOUtils import scala.util.parsing.json.JSON diff --git a/blob-commons/pom.xml b/blob-commons/pom.xml new file mode 100644 index 00000000..94a06f24 --- /dev/null +++ b/blob-commons/pom.xml @@ -0,0 +1,41 @@ + + + + cn.pandadb + parent + 0.0.2 + ../ + + + 4.0.0 + + cn.pandadb + blob-commons + + + cn.pandadb + commons + ${pandadb.version} + compile + + + eu.medsea.mimeutil + mime-util + + + commons-io + commons-io + + + commons-codec + commons-codec + + + org.apache.httpcomponents + httpclient + + + + \ No newline at end of file diff --git a/src/blob/scala/cn/graiph/blob/BlobIO.scala b/blob-commons/src/main/scala/cn/pandadb/blob/BlobIO.scala similarity index 96% rename from src/blob/scala/cn/graiph/blob/BlobIO.scala rename to blob-commons/src/main/scala/cn/pandadb/blob/BlobIO.scala index 61ccdaeb..eb34d462 100644 --- a/src/blob/scala/cn/graiph/blob/BlobIO.scala +++ b/blob-commons/src/main/scala/cn/pandadb/blob/BlobIO.scala @@ -17,12 +17,11 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -package cn.graiph.blob +package cn.pandadb.blob import java.io.ByteArrayOutputStream - -import cn.graiph.util.StreamUtils -import cn.graiph.util.StreamUtils._ +import cn.pandadb.util.StreamUtils +import cn.pandadb.util.StreamUtils._ /** * Created by bluejoe on 2019/4/18. diff --git a/src/graiph-database/scala/cn/graiph/CypherPluginRegistry.scala b/blob-commons/src/main/scala/cn/pandadb/blob/CypherPluginRegistry.scala similarity index 81% rename from src/graiph-database/scala/cn/graiph/CypherPluginRegistry.scala rename to blob-commons/src/main/scala/cn/pandadb/blob/CypherPluginRegistry.scala index b0dc85a8..8fdbda47 100644 --- a/src/graiph-database/scala/cn/graiph/CypherPluginRegistry.scala +++ b/blob-commons/src/main/scala/cn/pandadb/blob/CypherPluginRegistry.scala @@ -1,48 +1,11 @@ -package cn.graiph +package cn.pandadb.blob -import cn.graiph.blob.Blob -import cn.graiph.util.{Configuration, Logging} +import cn.pandadb.cypherplus._ +import cn.pandadb.util.{ContextMap, PandaException, Configuration, Logging} import scala.beans.BeanProperty import scala.collection.mutable -trait AnyComparator { - def initialize(conf: Configuration); -} - -trait ValueComparator extends AnyComparator { - def compare(a: Any, b: Any): Double; -} - -trait SetComparator extends AnyComparator { - def compareAsSets(a: Any, b: Any): Array[Array[Double]]; -} - -/** - * Created by bluejoe on 2019/1/31. - */ -trait CustomPropertyProvider { - def getCustomProperty(x: Any, propertyName: String): Option[Any]; -} - -trait ValueMatcher { - def like(a: Any, b: Any, algoName: Option[String], threshold: Option[Double]): Option[Boolean]; - - def containsOne(a: Any, b: Any, algoName: Option[String], threshold: Option[Double]): Option[Boolean]; - - def containsSet(a: Any, b: Any, algoName: Option[String], threshold: Option[Double]): Option[Boolean]; - - /** - * compares two values - */ - def compareOne(a: Any, b: Any, algoName: Option[String]): Option[Double]; - - /** - * compares two objects as sets - */ - def compareSet(a: Any, b: Any, algoName: Option[String]): Option[Array[Array[Double]]]; -} - object ValueType { def typeNameOf(x: Any): String = x match { case b: Blob => s"blob/${b.mimeType.major}".toLowerCase() @@ -74,7 +37,7 @@ class CypherPluginRegistry { @BeanProperty var extractors: Array[DomainExtractorEntry] = Array(); @BeanProperty var comparators: Array[DomainComparatorEntry] = Array(); - def createCustomPropertyProvider(conf: Configuration) = new CustomPropertyProvider { + def createCustomPropertyProvider(conf: Configuration): CustomPropertyProvider = new CustomPropertyProvider { extractors.foreach(_.extractor.initialize(conf)); //propertyName, typeName @@ -113,7 +76,7 @@ class CypherPluginRegistry { } } - def createValueComparatorRegistry(conf: Configuration) = new ValueMatcher with Logging { + def createValueComparatorRegistry(conf: Configuration): ValueMatcher = new ValueMatcher with Logging { type CompareAnyMethod = (Any, Any) => Any; type CompareValueMethod = (Any, Any) => Double; type CompareSetMethod = (Any, Any) => Array[Array[Double]]; @@ -181,14 +144,16 @@ class CypherPluginRegistry { } } - private def getMatchedComparator(compareValueOrSet: Boolean, typeA: String, typeB: String, algoName: Option[String]): Option[(CompareAnyMethod, DomainComparatorEntry)] = { + private def getMatchedComparator(compareValueOrSet: Boolean, typeA: String, typeB: String, + algoName: Option[String]): Option[(CompareAnyMethod, DomainComparatorEntry)] = { def isEntryMatched(entry: DomainComparatorEntry, typeName: String): Boolean = { val f = entry.domain.equalsIgnoreCase(typeName) && - (if (compareValueOrSet) + (if (compareValueOrSet) { entry.comparator.isInstanceOf[ValueComparator] - else + } + else { entry.comparator.isInstanceOf[SetComparator] - ) + }) algoName.map { name => f && entry.name.equalsIgnoreCase(name) @@ -198,10 +163,12 @@ class CypherPluginRegistry { } def doCompare(comparator: AnyComparator, a: Any, b: Any): Any = { - if (compareValueOrSet) - comparator.asInstanceOf[ValueComparator].compare(a, b); - else - comparator.asInstanceOf[SetComparator].compareAsSets(a, b); + if (compareValueOrSet) { + comparator.asInstanceOf[ValueComparator].compare(a, b) + } + else { + comparator.asInstanceOf[SetComparator].compareAsSets(a, b) + } } comparators.find(isEntryMatched(_, ValueType.concat(typeA, typeB))) @@ -250,16 +217,17 @@ class CypherPluginRegistry { } class UnknownPropertyException(name: String, x: Any) - extends RuntimeException(s"unknown property `$name` for $x") { + extends PandaException(s"unknown property `$name` for $x") { } class NoSuitableComparatorException(a: Any, b: Any, algoName: Option[String]) - extends RuntimeException(s"no suiltable comparator: ${ValueType.typeNameOf(a)} and ${ValueType.typeNameOf(b)}, algorithm name: ${algoName.getOrElse("(none)")}") { + extends PandaException(s"no suiltable comparator: ${ValueType.typeNameOf(a)} and ${ValueType.typeNameOf(b)}," + + s" algorithm name: ${algoName.getOrElse("(none)")}") { } class TooManyObjectsException(o: Any) - extends RuntimeException(s"too many objects: $o") { + extends PandaException(s"too many objects: $o") { } \ No newline at end of file diff --git a/blob-commons/src/main/scala/cn/pandadb/blob/InputStreamSource.scala b/blob-commons/src/main/scala/cn/pandadb/blob/InputStreamSource.scala new file mode 100644 index 00000000..a3f33cb1 --- /dev/null +++ b/blob-commons/src/main/scala/cn/pandadb/blob/InputStreamSource.scala @@ -0,0 +1,13 @@ +package cn.pandadb.blob + +import java.io.InputStream + +/** + * Created by bluejoe on 2019/11/10. + */ +trait InputStreamSource { + /** + * note close input stream after consuming + */ + def offerStream[T](consume: (InputStream) => T): T; +} diff --git a/src/blob/scala/cn/graiph/blob/MimeType.scala b/blob-commons/src/main/scala/cn/pandadb/blob/MimeType.scala similarity index 84% rename from src/blob/scala/cn/graiph/blob/MimeType.scala rename to blob-commons/src/main/scala/cn/pandadb/blob/MimeType.scala index a482e8f9..31fb0a17 100644 --- a/src/blob/scala/cn/graiph/blob/MimeType.scala +++ b/blob-commons/src/main/scala/cn/pandadb/blob/MimeType.scala @@ -17,10 +17,11 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -package cn.graiph.blob +package cn.pandadb.blob import java.util.Properties +import cn.pandadb.util.PandaException import eu.medsea.mimeutil.MimeUtil import org.apache.commons.io.IOUtils @@ -30,9 +31,9 @@ import scala.collection.JavaConversions._ * Created by bluejoe on 2019/4/18. */ case class MimeType(code: Long, text: String) { - def major = text.split("/")(0); + def major: String = text.split("/")(0); - def minor = text.split("/")(1); + def minor: String = text.split("/")(1); } object MimeType { @@ -48,7 +49,7 @@ object MimeType { new MimeType(type2Codes.get(lc).getOrElse(throw new UnknownMimeTypeException(lc)), lc); } - def fromCode(code: Long) = new MimeType(code, code2Types(code)); + def fromCode(code: Long): MimeType = new MimeType(code, code2Types(code)); def guessMimeType(iss: InputStreamSource): MimeType = { val mimeTypes = iss.offerStream { is => @@ -59,6 +60,6 @@ object MimeType { } } -class UnknownMimeTypeException(mtype: String) extends RuntimeException(s"unknown mime-type: $mtype") { +class UnknownMimeTypeException(mtype: String) extends PandaException(s"unknown mime-type: $mtype") { } \ No newline at end of file diff --git a/src/blob/scala/cn/graiph/blob/blob.scala b/blob-commons/src/main/scala/cn/pandadb/blob/blob.scala similarity index 93% rename from src/blob/scala/cn/graiph/blob/blob.scala rename to blob-commons/src/main/scala/cn/pandadb/blob/blob.scala index d992bc34..cab31a7c 100644 --- a/src/blob/scala/cn/graiph/blob/blob.scala +++ b/blob-commons/src/main/scala/cn/pandadb/blob/blob.scala @@ -18,25 +18,18 @@ * along with this program. If not, see . */ -package cn.graiph.blob +package cn.pandadb.blob import java.io._ import java.net.URL -import cn.graiph.util.StreamUtils +import cn.pandadb.util.StreamUtils import org.apache.commons.codec.binary.Hex import org.apache.commons.io.IOUtils import org.apache.http.client.methods.HttpGet import org.apache.http.impl.client.HttpClientBuilder import StreamUtils._ -trait InputStreamSource { - /** - * note close input stream after consuming - */ - def offerStream[T](consume: (InputStream) => T): T; -} - trait BlobEntry { val id: BlobId; val length: Long; @@ -50,7 +43,7 @@ trait Blob extends Comparable[Blob] { def offerStream[T](consume: (InputStream) => T): T = streamSource.offerStream(consume); - def toBytes() = offerStream(IOUtils.toByteArray(_)); + def toBytes(): Array[Byte] = offerStream(IOUtils.toByteArray(_)); def makeTempFile(): File = { offerStream((is) => { @@ -60,9 +53,9 @@ trait Blob extends Comparable[Blob] { }) } - override def compareTo(o: Blob) = this.length.compareTo(o.length); + override def compareTo(o: Blob): Int = this.length.compareTo(o.length); - override def toString = s"blob(length=${length},mime-type=${mimeType.text})"; + override def toString: String = s"blob(length=${length},mime-type=${mimeType.text})"; } //actually a 4-long values diff --git a/src/blob/scala/cn/graiph/blob/messages.scala b/blob-commons/src/main/scala/cn/pandadb/blob/messages.scala similarity index 97% rename from src/blob/scala/cn/graiph/blob/messages.scala rename to blob-commons/src/main/scala/cn/pandadb/blob/messages.scala index 677c1932..84655a81 100644 --- a/src/blob/scala/cn/graiph/blob/messages.scala +++ b/blob-commons/src/main/scala/cn/pandadb/blob/messages.scala @@ -17,7 +17,7 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -package cn.graiph.blob +package cn.pandadb.blob object BlobMessageSignature { val SIGNATURE_GET_BLOB: Byte = 0x55; diff --git a/src/graiph-database/scala/cn/graiph/util/CodecUtils.scala b/blob-commons/src/main/scala/cn/pandadb/blob/util/CodecUtils.scala similarity index 80% rename from src/graiph-database/scala/cn/graiph/util/CodecUtils.scala rename to blob-commons/src/main/scala/cn/pandadb/blob/util/CodecUtils.scala index 77ee76c6..a9ba0568 100644 --- a/src/graiph-database/scala/cn/graiph/util/CodecUtils.scala +++ b/blob-commons/src/main/scala/cn/pandadb/blob/util/CodecUtils.scala @@ -1,10 +1,11 @@ -package cn.graiph.util +package cn.pandadb.blob.util import java.io.InputStream -import cn.graiph.blob.InputStreamSource +import cn.pandadb.blob.InputStreamSource import org.apache.commons.codec.binary.Hex import org.apache.commons.codec.digest.DigestUtils +import cn.pandadb.util.StreamUtils /** * Created by bluejoe on 2018/8/9. @@ -33,7 +34,8 @@ object CodecUtils { def md5AsHex(bytes: Array[Byte]): String = md5.digestAsHex(bytes); - def encodeHexString(bytes: Array[Byte]) = Hex.encodeHexString(bytes); + def encodeHexString(bytes: Array[Byte]): String = + Hex.encodeHexString(bytes); def md5AsHex(value: Long): String = md5AsHex(StreamUtils.convertLongArray2ByteArray(Array(value))); diff --git a/blob-feature/pom.xml b/blob-feature/pom.xml new file mode 100644 index 00000000..68c7dc01 --- /dev/null +++ b/blob-feature/pom.xml @@ -0,0 +1,71 @@ + + + + parent + cn.pandadb + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + blob-feature + + + + com.google.code.findbugs + jsr305 + + + cn.pandadb + commons + ${pandadb.version} + compile + + + cn.pandadb + blob-commons + ${pandadb.version} + compile + + + cn.pandadb + external-properties + ${pandadb.version} + + + cn.pandadb + neo4j-hacking + ${pandadb.version} + + + + org.springframework + spring-context + + + + + + + net.alchim31.maven + scala-maven-plugin + 3.2.1 + + + scala-compile-first + process-resources + + add-source + compile + + + + + + + + + \ No newline at end of file diff --git a/src/blob/java/org/neo4j/bolt/v1/messaging/Neo4jPackV1.java b/blob-feature/src/main/java/org/neo4j/bolt/v1/messaging/Neo4jPackV1.java similarity index 98% rename from src/blob/java/org/neo4j/bolt/v1/messaging/Neo4jPackV1.java rename to blob-feature/src/main/java/org/neo4j/bolt/v1/messaging/Neo4jPackV1.java index 0c67357d..d4493760 100644 --- a/src/blob/java/org/neo4j/bolt/v1/messaging/Neo4jPackV1.java +++ b/blob-feature/src/main/java/org/neo4j/bolt/v1/messaging/Neo4jPackV1.java @@ -28,7 +28,8 @@ import java.util.ArrayList; import java.util.List; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; +import org.neo4j.bolt.blob.BoltServerBlobIO; import org.neo4j.bolt.messaging.BoltIOException; import org.neo4j.bolt.messaging.Neo4jPack; import org.neo4j.bolt.messaging.StructType; @@ -458,6 +459,12 @@ protected UnpackerV1( PackInput input ) @Override public AnyValue unpack() throws IOException { + AnyValue blobValue = BoltServerBlobIO.unpackBlob( this ); + if ( blobValue != null ) + { + return blobValue; + } + PackType valType = peekNextType(); switch ( valType ) { diff --git a/src/blob/java/org/neo4j/bolt/v1/runtime/TransactionStateMachineV1SPI.java b/blob-feature/src/main/java/org/neo4j/bolt/v1/runtime/TransactionStateMachineV1SPI.java similarity index 100% rename from src/blob/java/org/neo4j/bolt/v1/runtime/TransactionStateMachineV1SPI.java rename to blob-feature/src/main/java/org/neo4j/bolt/v1/runtime/TransactionStateMachineV1SPI.java diff --git a/src/blob/java/org/neo4j/bolt/v2/messaging/Neo4jPackV2.java b/blob-feature/src/main/java/org/neo4j/bolt/v2/messaging/Neo4jPackV2.java similarity index 97% rename from src/blob/java/org/neo4j/bolt/v2/messaging/Neo4jPackV2.java rename to blob-feature/src/main/java/org/neo4j/bolt/v2/messaging/Neo4jPackV2.java index d6c282c5..85a6f4a7 100644 --- a/src/blob/java/org/neo4j/bolt/v2/messaging/Neo4jPackV2.java +++ b/blob-feature/src/main/java/org/neo4j/bolt/v2/messaging/Neo4jPackV2.java @@ -30,6 +30,8 @@ import java.time.ZonedDateTime; import java.util.Arrays; +import cn.pandadb.blob.Blob; +import org.neo4j.bolt.blob.BoltServerBlobIO; import org.neo4j.bolt.messaging.BoltIOException; import org.neo4j.bolt.messaging.Neo4jPack; import org.neo4j.bolt.messaging.StructType; @@ -109,7 +111,11 @@ public long version() protected static class PackerV2 extends Neo4jPackV1.PackerV1 { - protected PackerV2( PackOutput output ) + @Override + public void writeBlob( Blob blob ) throws IOException + { + BoltServerBlobIO.packBlob( blob, out ); + } protected PackerV2( PackOutput output ) { super( output ); } diff --git a/src/blob/java/org/neo4j/bolt/v5/request/BoltRequestMessageReaderV5.java b/blob-feature/src/main/java/org/neo4j/bolt/v3/messaging/BoltRequestMessageReaderV3.java similarity index 89% rename from src/blob/java/org/neo4j/bolt/v5/request/BoltRequestMessageReaderV5.java rename to blob-feature/src/main/java/org/neo4j/bolt/v3/messaging/BoltRequestMessageReaderV3.java index 23680651..f7f71def 100644 --- a/src/blob/java/org/neo4j/bolt/v5/request/BoltRequestMessageReaderV5.java +++ b/blob-feature/src/main/java/org/neo4j/bolt/v3/messaging/BoltRequestMessageReaderV3.java @@ -17,7 +17,10 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -package org.neo4j.bolt.v5.request; +package org.neo4j.bolt.v3.messaging; + +import java.util.Arrays; +import java.util.List; import org.neo4j.bolt.blob.GetBlobMessageDecoder; import org.neo4j.bolt.messaging.BoltRequestMessageReader; @@ -30,7 +33,6 @@ import org.neo4j.bolt.v1.messaging.decoder.DiscardAllMessageDecoder; import org.neo4j.bolt.v1.messaging.decoder.PullAllMessageDecoder; import org.neo4j.bolt.v1.messaging.decoder.ResetMessageDecoder; -import org.neo4j.bolt.v3.messaging.BoltRequestMessageReaderV3; import org.neo4j.bolt.v3.messaging.decoder.BeginMessageDecoder; import org.neo4j.bolt.v3.messaging.decoder.CommitMessageDecoder; import org.neo4j.bolt.v3.messaging.decoder.GoodbyeMessageDecoder; @@ -40,33 +42,29 @@ import org.neo4j.logging.Log; import org.neo4j.logging.internal.LogService; -import java.util.Arrays; -import java.util.List; - -public class BoltRequestMessageReaderV5 extends BoltRequestMessageReader +public class BoltRequestMessageReaderV3 extends BoltRequestMessageReader { - public BoltRequestMessageReaderV5( BoltConnection connection, BoltResponseMessageWriter responseMessageWriter, - LogService logService ) + public BoltRequestMessageReaderV3( BoltConnection connection, BoltResponseMessageWriter responseMessageWriter, + LogService logService ) { super( connection, newSimpleResponseHandler( responseMessageWriter, connection, logService ), buildDecoders( connection, responseMessageWriter, logService ) ); } private static List buildDecoders( BoltConnection connection, BoltResponseMessageWriter responseMessageWriter, - LogService logService ) + LogService logService ) { BoltResponseHandler resultHandler = new ResultHandler( responseMessageWriter, connection, internalLog( logService ) ); BoltResponseHandler defaultHandler = newSimpleResponseHandler( responseMessageWriter, connection, logService ); return Arrays.asList( + //NOTE: add blob + new GetBlobMessageDecoder( resultHandler ), + //NOTE new HelloMessageDecoder( defaultHandler ), new RunMessageDecoder( defaultHandler ), new DiscardAllMessageDecoder( resultHandler ), new PullAllMessageDecoder( resultHandler ), - - //add blob - new GetBlobMessageDecoder( resultHandler ), - new BeginMessageDecoder( defaultHandler ), new CommitMessageDecoder( resultHandler ), new RollbackMessageDecoder( resultHandler ), @@ -76,7 +74,7 @@ private static List buildDecoders( BoltConnection connect } private static BoltResponseHandler newSimpleResponseHandler( BoltResponseMessageWriter responseMessageWriter, BoltConnection connection, - LogService logService ) + LogService logService ) { return new MessageProcessingHandler( responseMessageWriter, connection, internalLog( logService ) ); } diff --git a/blob-feature/src/main/java/org/neo4j/bolt/v3/runtime/TransactionReadyState.java b/blob-feature/src/main/java/org/neo4j/bolt/v3/runtime/TransactionReadyState.java new file mode 100644 index 00000000..58659c0e --- /dev/null +++ b/blob-feature/src/main/java/org/neo4j/bolt/v3/runtime/TransactionReadyState.java @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +package org.neo4j.bolt.v3.runtime; + +import org.neo4j.bolt.blob.GetBlobMessage; +import org.neo4j.bolt.messaging.RequestMessage; +import org.neo4j.bolt.runtime.BoltStateMachineState; +import org.neo4j.bolt.runtime.StateMachineContext; +import org.neo4j.bolt.runtime.StatementMetadata; +import org.neo4j.bolt.runtime.StatementProcessor; +import org.neo4j.bolt.v1.runtime.BoltAuthenticationHelper; +import org.neo4j.bolt.v1.runtime.bookmarking.Bookmark; +import org.neo4j.bolt.v3.messaging.request.CommitMessage; +import org.neo4j.bolt.v3.messaging.request.RollbackMessage; +import org.neo4j.bolt.v3.messaging.request.RunMessage; +import org.neo4j.internal.kernel.api.exceptions.KernelException; +import org.neo4j.values.storable.Values; + +import static org.neo4j.bolt.v3.runtime.ReadyState.FIELDS_KEY; +import static org.neo4j.bolt.v3.runtime.ReadyState.FIRST_RECORD_AVAILABLE_KEY; +import static org.neo4j.util.Preconditions.checkState; +import static org.neo4j.values.storable.Values.stringArray; + +public class TransactionReadyState extends FailSafeBoltStateMachineState +{ + private BoltStateMachineState streamingState; + private BoltStateMachineState readyState; + + @Override + public BoltStateMachineState processUnsafe( RequestMessage message, StateMachineContext context ) throws Exception + { + //NOTE: get blob? + if ( message instanceof GetBlobMessage) + { + ((GetBlobMessage) message).accepts( context ); + return this; + } + //NOTE + + if ( message instanceof RunMessage ) + { + return processRunMessage( (RunMessage) message, context ); + } + if ( message instanceof CommitMessage ) + { + return processCommitMessage( context ); + } + if ( message instanceof RollbackMessage ) + { + return processRollbackMessage( context ); + } + return null; + } + + @Override + public String name() + { + return "TX_READY"; + } + + public void setTransactionStreamingState( BoltStateMachineState streamingState ) + { + this.streamingState = streamingState; + } + + public void setReadyState( BoltStateMachineState readyState ) + { + this.readyState = readyState; + } + + private BoltStateMachineState processRunMessage( RunMessage message, StateMachineContext context ) throws KernelException + { + long start = context.clock().millis(); + StatementProcessor statementProcessor = context.connectionState().getStatementProcessor(); + // NOTE: + StatementMetadata statementMetadata; + //if (BoltAuthenticationHelper.IS_DISPATCHER_NODE){ + if(true){ + statementMetadata = statementProcessor.run( message.statement(), message.params(), null, null, null ); + }else{ + statementMetadata = statementProcessor.run( message.statement(), message.params() ); + } + long end = context.clock().millis(); + + context.connectionState().onMetadata( FIELDS_KEY, stringArray( statementMetadata.fieldNames() ) ); + context.connectionState().onMetadata( FIRST_RECORD_AVAILABLE_KEY, Values.longValue( end - start ) ); + return streamingState; + } + + private BoltStateMachineState processCommitMessage( StateMachineContext context ) throws Exception + { + StatementProcessor statementProcessor = context.connectionState().getStatementProcessor(); + Bookmark bookmark = statementProcessor.commitTransaction(); + bookmark.attachTo( context.connectionState() ); + return readyState; + } + + private BoltStateMachineState processRollbackMessage( StateMachineContext context ) throws Exception + { + StatementProcessor statementProcessor = context.connectionState().getStatementProcessor(); + statementProcessor.rollbackTransaction(); + return readyState; + } + + @Override + protected void assertInitialized() + { + checkState( streamingState != null, "Streaming state not set" ); + checkState( readyState != null, "Ready state not set" ); + super.assertInitialized(); + } +} diff --git a/src/blob/java/org/neo4j/bolt/v5/runtime/TransactionStateMachineV5SPI.java b/blob-feature/src/main/java/org/neo4j/bolt/v3/runtime/TransactionStateMachineV3SPI.java similarity index 51% rename from src/blob/java/org/neo4j/bolt/v5/runtime/TransactionStateMachineV5SPI.java rename to blob-feature/src/main/java/org/neo4j/bolt/v3/runtime/TransactionStateMachineV3SPI.java index 9654c3fb..b317f415 100644 --- a/src/blob/java/org/neo4j/bolt/v5/runtime/TransactionStateMachineV5SPI.java +++ b/blob-feature/src/main/java/org/neo4j/bolt/v3/runtime/TransactionStateMachineV3SPI.java @@ -17,34 +17,60 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -package org.neo4j.bolt.v5.runtime; +package org.neo4j.bolt.v3.runtime; + +import java.time.Clock; +import java.time.Duration; +import java.util.Map; import org.neo4j.bolt.BoltChannel; import org.neo4j.bolt.blob.BoltTransactionListener; -import org.neo4j.bolt.v3.runtime.TransactionStateMachineV3SPI; +import org.neo4j.bolt.runtime.BoltResult; +import org.neo4j.bolt.runtime.BoltResultHandle; +import org.neo4j.bolt.v1.runtime.TransactionStateMachineV1SPI; +import org.neo4j.cypher.internal.javacompat.QueryResultProvider; import org.neo4j.internal.kernel.api.security.LoginContext; import org.neo4j.kernel.api.KernelTransaction; import org.neo4j.kernel.impl.coreapi.InternalTransaction; +import org.neo4j.kernel.impl.query.TransactionalContext; import org.neo4j.kernel.internal.GraphDatabaseAPI; +import org.neo4j.values.virtual.MapValue; -import java.time.Clock; -import java.time.Duration; -import java.util.Map; - -public class TransactionStateMachineV5SPI extends TransactionStateMachineV3SPI +public class TransactionStateMachineV3SPI extends TransactionStateMachineV1SPI { - public TransactionStateMachineV5SPI( GraphDatabaseAPI db, BoltChannel boltChannel, Duration txAwaitDuration, - Clock clock ) - { - super( db, boltChannel, txAwaitDuration, clock ); - } - + //NOTE: blob streaming @Override - protected InternalTransaction beginTransaction( KernelTransaction.Type type, LoginContext loginContext, + protected InternalTransaction beginTransaction(KernelTransaction.Type type, LoginContext loginContext, Duration txTimeout, Map txMetadata ) { InternalTransaction tx = super.beginTransaction( type, loginContext, txTimeout, txMetadata ); BoltTransactionListener.onTransactionCreate( tx ); return tx; } + //NOTE + + public TransactionStateMachineV3SPI( GraphDatabaseAPI db, BoltChannel boltChannel, Duration txAwaitDuration, Clock clock ) + { + super( db, boltChannel, txAwaitDuration, clock ); + } + + @Override + protected BoltResultHandle newBoltResultHandle( String statement, MapValue params, TransactionalContext transactionalContext ) + { + return new BoltResultHandleV3( statement, params, transactionalContext ); + } + + private class BoltResultHandleV3 extends BoltResultHandleV1 + { + BoltResultHandleV3( String statement, MapValue params, TransactionalContext transactionalContext ) + { + super( statement, params, transactionalContext ); + } + + @Override + protected BoltResult newBoltResult( QueryResultProvider result, Clock clock ) + { + return new CypherAdapterStreamV3( result.queryResult(), clock ); + } + } } diff --git a/src/blob/java/org/neo4j/cypher/internal/codegen/ParameterConverter.java b/blob-feature/src/main/java/org/neo4j/cypher/internal/codegen/ParameterConverter.java similarity index 99% rename from src/blob/java/org/neo4j/cypher/internal/codegen/ParameterConverter.java rename to blob-feature/src/main/java/org/neo4j/cypher/internal/codegen/ParameterConverter.java index 975ce108..5b872ae0 100644 --- a/src/blob/java/org/neo4j/cypher/internal/codegen/ParameterConverter.java +++ b/blob-feature/src/main/java/org/neo4j/cypher/internal/codegen/ParameterConverter.java @@ -33,7 +33,7 @@ import java.util.Iterator; import java.util.List; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import org.neo4j.graphdb.Node; import org.neo4j.graphdb.Path; import org.neo4j.graphdb.PropertyContainer; diff --git a/src/blob/java/org/neo4j/internal/kernel/api/procs/Neo4jTypes.java b/blob-feature/src/main/java/org/neo4j/internal/kernel/api/procs/Neo4jTypes.java similarity index 99% rename from src/blob/java/org/neo4j/internal/kernel/api/procs/Neo4jTypes.java rename to blob-feature/src/main/java/org/neo4j/internal/kernel/api/procs/Neo4jTypes.java index 25ace7e0..8d8b7355 100644 --- a/src/blob/java/org/neo4j/internal/kernel/api/procs/Neo4jTypes.java +++ b/blob-feature/src/main/java/org/neo4j/internal/kernel/api/procs/Neo4jTypes.java @@ -50,6 +50,7 @@ public class Neo4jTypes //NOTE: blob public static final NeoBlobType NTBlob = new NeoBlobType(); + //NOTE public static class NeoBlobType extends AnyType { diff --git a/src/blob/java/org/neo4j/kernel/api/index/ArrayEncoder.java b/blob-feature/src/main/java/org/neo4j/kernel/api/index/ArrayEncoder.java similarity index 99% rename from src/blob/java/org/neo4j/kernel/api/index/ArrayEncoder.java rename to blob-feature/src/main/java/org/neo4j/kernel/api/index/ArrayEncoder.java index 20a5789d..cbadf7fe 100644 --- a/src/blob/java/org/neo4j/kernel/api/index/ArrayEncoder.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/api/index/ArrayEncoder.java @@ -26,7 +26,7 @@ import java.time.ZonedDateTime; import java.util.Base64; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import org.neo4j.string.UTF8; import org.neo4j.values.storable.BlobValue; import org.neo4j.values.storable.CoordinateReferenceSystem; diff --git a/src/blob/java/org/neo4j/kernel/impl/api/state/AppendOnlyValuesContainer.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/api/state/AppendOnlyValuesContainer.java similarity index 99% rename from src/blob/java/org/neo4j/kernel/impl/api/state/AppendOnlyValuesContainer.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/api/state/AppendOnlyValuesContainer.java index 887d9f8b..7f0a76c5 100644 --- a/src/blob/java/org/neo4j/kernel/impl/api/state/AppendOnlyValuesContainer.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/api/state/AppendOnlyValuesContainer.java @@ -33,9 +33,9 @@ import java.util.List; import javax.annotation.Nonnull; -import cn.graiph.blob.Blob; -import cn.graiph.blob.BlobIO; -import cn.graiph.blob.BlobWithId; +import cn.pandadb.blob.Blob; +import cn.pandadb.blob.BlobIO; +import cn.pandadb.blob.BlobWithId; import org.neo4j.graphdb.Resource; import org.neo4j.io.ByteUnit; import org.neo4j.kernel.impl.util.collection.Memory; diff --git a/src/blob/java/org/neo4j/kernel/impl/index/schema/Types.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/index/schema/Types.java similarity index 100% rename from src/blob/java/org/neo4j/kernel/impl/index/schema/Types.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/index/schema/Types.java diff --git a/src/blob/java/org/neo4j/kernel/impl/proc/TypeMappers.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/proc/TypeMappers.java similarity index 99% rename from src/blob/java/org/neo4j/kernel/impl/proc/TypeMappers.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/proc/TypeMappers.java index 13377ed6..934954e5 100644 --- a/src/blob/java/org/neo4j/kernel/impl/proc/TypeMappers.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/proc/TypeMappers.java @@ -34,7 +34,7 @@ import java.util.function.Function; import java.util.stream.Collectors; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import org.neo4j.helpers.collection.Iterables; import org.neo4j.internal.kernel.api.exceptions.ProcedureException; import org.neo4j.internal.kernel.api.procs.DefaultParameterValue; @@ -132,7 +132,8 @@ public TypeMappers( EmbeddedProxySPI proxySPI ) private void registerScalarsAndCollections() { //NOTE: supports blob - registerType(Blob.class, TO_BLOB); + registerType( Blob.class, TO_BLOB ); + //NOTE registerType( String.class, TO_STRING ); registerType( long.class, TO_INTEGER ); diff --git a/src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyCreator.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyCreator.java similarity index 96% rename from src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyCreator.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyCreator.java index 1139754f..0739fdc0 100644 --- a/src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyCreator.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyCreator.java @@ -19,23 +19,18 @@ */ package org.neo4j.kernel.impl.storageengine.impl.recordstorage; -import java.util.Iterator; -import java.util.function.Consumer; - -import org.neo4j.kernel.impl.InstanceContext; import org.neo4j.kernel.impl.store.DynamicRecordAllocator; import org.neo4j.kernel.impl.store.PropertyStore; import org.neo4j.kernel.impl.store.PropertyType; import org.neo4j.kernel.impl.store.id.IdSequence; -import org.neo4j.kernel.impl.store.record.DynamicRecord; -import org.neo4j.kernel.impl.store.record.PrimitiveRecord; -import org.neo4j.kernel.impl.store.record.PropertyBlock; -import org.neo4j.kernel.impl.store.record.PropertyRecord; -import org.neo4j.kernel.impl.store.record.Record; +import org.neo4j.kernel.impl.store.record.*; import org.neo4j.kernel.impl.transaction.state.RecordAccess; import org.neo4j.kernel.impl.transaction.state.RecordAccess.RecordProxy; import org.neo4j.values.storable.Value; +import java.util.Iterator; +import java.util.function.Consumer; + public class PropertyCreator { private final DynamicRecordAllocator stringRecordAllocator; @@ -174,7 +169,7 @@ private void removeProperty( PrimitiveRecord primitive, PropertyRecord host, Pro { host.removePropertyBlock( block.getKeyIndexId() ); //on delete - block.getType().onPropertyDelete( InstanceContext.of( this.propertyRecordIdGenerator ), primitive, host, block); + block.getType().onPropertyDelete(primitive, host, block); host.setChanged( primitive ); for ( DynamicRecord record : block.getValueRecords() ) diff --git a/src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyDeleter.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyDeleter.java similarity index 93% rename from src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyDeleter.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyDeleter.java index 1f0ea234..c3698e3d 100644 --- a/src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyDeleter.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/PropertyDeleter.java @@ -18,13 +18,7 @@ * along with this program. If not, see . */ package org.neo4j.kernel.impl.storageengine.impl.recordstorage; - -import org.neo4j.kernel.impl.InstanceContext; -import org.neo4j.kernel.impl.store.record.DynamicRecord; -import org.neo4j.kernel.impl.store.record.PrimitiveRecord; -import org.neo4j.kernel.impl.store.record.PropertyBlock; -import org.neo4j.kernel.impl.store.record.PropertyRecord; -import org.neo4j.kernel.impl.store.record.Record; +import org.neo4j.kernel.impl.store.record.*; import org.neo4j.kernel.impl.transaction.state.RecordAccess; import org.neo4j.kernel.impl.transaction.state.RecordAccess.RecordProxy; @@ -50,7 +44,7 @@ public void deletePropertyChain( PrimitiveRecord primitive, PropertyRecord propRecord = propertyChange.forChangingData(); propRecord.forEach( block -> { - block.getType().onPropertyDelete( InstanceContext.of(propertyRecords), primitive, propRecord, block ); + block.getType().onPropertyDelete( primitive, propRecord, block ); } ); deletePropertyRecordIncludingValueRecords( propRecord ); @@ -138,7 +132,7 @@ private

void removeProperty( RecordProxy pri + propertyId + "]" ); } - block.getType().onPropertyDelete( InstanceContext.of( propertyRecords ), primitive, propRecord, block); + block.getType().onPropertyDelete( primitive, propRecord, block); for ( DynamicRecord valueRecord : block.getValueRecords() ) { diff --git a/src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/RecordPropertyCursor.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/RecordPropertyCursor.java similarity index 98% rename from src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/RecordPropertyCursor.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/RecordPropertyCursor.java index fccf4f55..4399aa74 100644 --- a/src/blob/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/RecordPropertyCursor.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/storageengine/impl/recordstorage/RecordPropertyCursor.java @@ -22,7 +22,6 @@ import java.nio.ByteBuffer; import org.neo4j.io.pagecache.PageCursor; -import org.neo4j.kernel.impl.InstanceContext; import org.neo4j.kernel.impl.blob.StoreBlobIO; import org.neo4j.kernel.impl.store.GeometryType; import org.neo4j.kernel.impl.store.LongerShortString; @@ -241,7 +240,7 @@ private Value readValue() case TEMPORAL: return temporalValue(); case BLOB: - return StoreBlobIO.readBlobValue( InstanceContext.of( read ), this.getBlocks()); + return StoreBlobIO.readBlobValue( this.getBlocks()); default: throw new IllegalStateException( "Unsupported PropertyType: " + type.name() ); } @@ -407,6 +406,6 @@ private ArrayValue array( RecordPropertyCursor cursor, long reference, PageCurso { ByteBuffer buffer = cursor.buffer = read.loadArray( reference, cursor.buffer, page ); buffer.flip(); - return PropertyStore.readArrayFromBuffer( InstanceContext.of( read ), buffer ); + return PropertyStore.readArrayFromBuffer( buffer ); } } diff --git a/src/blob/java/org/neo4j/kernel/impl/store/DynamicArrayStore.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/store/DynamicArrayStore.java similarity index 96% rename from src/blob/java/org/neo4j/kernel/impl/store/DynamicArrayStore.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/store/DynamicArrayStore.java index a13aa6c9..378eb0b5 100644 --- a/src/blob/java/org/neo4j/kernel/impl/store/DynamicArrayStore.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/store/DynamicArrayStore.java @@ -30,12 +30,10 @@ import java.time.ZonedDateTime; import java.util.Collection; -import cn.graiph.blob.Blob; -import cn.graiph.util.ContextMap; +import cn.pandadb.blob.Blob; import org.neo4j.helpers.collection.Pair; import org.neo4j.io.pagecache.PageCache; import org.neo4j.kernel.configuration.Config; -import org.neo4j.kernel.impl.InstanceContext; import org.neo4j.kernel.impl.blob.StoreBlobIO; import org.neo4j.kernel.impl.store.format.Capability; import org.neo4j.kernel.impl.store.format.RecordFormats; @@ -207,7 +205,7 @@ private static void allocateFromBlob( Collection target, Blob[] a for ( int i = 0; i < array.length; i++ ) { Blob blob = array[i]; - byte[] bytes = StoreBlobIO.saveAndEncodeBlobAsByteArray( InstanceContext.of( recordAllocator ), blob ); + byte[] bytes = StoreBlobIO.saveAndEncodeBlobAsByteArray( blob ); blobsAsBytes[i] = bytes; totalBytesRequired += 4/*byte[].length*/ + bytes.length; } @@ -346,7 +344,7 @@ else if ( type.equals( DurationValue.class ) ) } } - public static Value getRightArray( ContextMap ic, Pair data ) + public static Value getRightArray( Pair data ) { byte[] header = data.first(); byte[] bArray = data.other(); @@ -373,18 +371,18 @@ else if ( typeId == PropertyType.BLOB.intValue() ) ByteBuffer headerBuffer = ByteBuffer.wrap( header, 1/*skip the type*/, header.length - 1 ); int arrayLength = headerBuffer.getInt(); ByteBuffer dataBuffer = ByteBuffer.wrap( bArray ); - Blob[] result = StoreBlobIO.readBlobArray( ic, dataBuffer, arrayLength ); + Blob[] result = StoreBlobIO.readBlobArray( dataBuffer, arrayLength ); return Values.blobArray( result ); } else if ( typeId == PropertyType.GEOMETRY.intValue() ) { GeometryType.GeometryHeader geometryHeader = GeometryType.GeometryHeader.fromArrayHeaderBytes(header); - return GeometryType.decodeGeometryArray( ic, geometryHeader, bArray ); + return GeometryType.decodeGeometryArray( geometryHeader, bArray ); } else if ( typeId == PropertyType.TEMPORAL.intValue() ) { TemporalType.TemporalHeader temporalHeader = TemporalType.TemporalHeader.fromArrayHeaderBytes(header); - return TemporalType.decodeTemporalArray( ic, temporalHeader, bArray ); + return TemporalType.decodeTemporalArray( temporalHeader, bArray ); } else { @@ -410,6 +408,6 @@ else if ( typeId == PropertyType.TEMPORAL.intValue() ) public Object getArrayFor( Iterable records ) { - return getRightArray( InstanceContext.of( this ), readFullByteArray( records, PropertyType.ARRAY ) ).asObject(); + return getRightArray( readFullByteArray( records, PropertyType.ARRAY ) ).asObject(); } } diff --git a/src/blob/java/org/neo4j/kernel/impl/store/PropertyStore.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/store/PropertyStore.java similarity index 97% rename from src/blob/java/org/neo4j/kernel/impl/store/PropertyStore.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/store/PropertyStore.java index fb21dd4a..c2180369 100644 --- a/src/blob/java/org/neo4j/kernel/impl/store/PropertyStore.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/store/PropertyStore.java @@ -29,14 +29,12 @@ import java.util.List; import java.util.function.ToIntFunction; -import cn.graiph.blob.Blob; -import cn.graiph.util.ContextMap; +import cn.pandadb.blob.Blob; import org.neo4j.helpers.collection.Iterables; import org.neo4j.helpers.collection.Pair; import org.neo4j.io.pagecache.PageCache; import org.neo4j.io.pagecache.PageCursor; import org.neo4j.kernel.configuration.Config; -import org.neo4j.kernel.impl.InstanceContext; import org.neo4j.kernel.impl.blob.StoreBlobIO; import org.neo4j.kernel.impl.store.format.Capability; import org.neo4j.kernel.impl.store.format.RecordFormats; @@ -632,7 +630,7 @@ public void writeDateTime( long epochSecondUTC, int nano, String zoneId ) throws @Override public void writeBlob( Blob blob ) { - StoreBlobIO.saveBlob( InstanceContext.of( stringAllocator ), blob, this.keyId, this.block ); + StoreBlobIO.saveBlob( blob, this.keyId, this.block ); } } @@ -678,7 +676,7 @@ Value getArrayFor( PropertyBlock propertyBlock ) private Value getArrayFor( Iterable records ) { - return getRightArray( InstanceContext.of( this ), arrayStore.readFullByteArray( records, PropertyType.ARRAY ) ); + return getRightArray( arrayStore.readFullByteArray( records, PropertyType.ARRAY ) ); } @Override @@ -720,7 +718,7 @@ public ToIntFunction newValueEncodedSizeCalculator() return new PropertyValueRecordSizeCalculator( this ); } - public static ArrayValue readArrayFromBuffer( ContextMap ic, ByteBuffer buffer ) + public static ArrayValue readArrayFromBuffer( ByteBuffer buffer ) { if ( buffer.limit() <= 0 ) { @@ -748,7 +746,7 @@ public static ArrayValue readArrayFromBuffer( ContextMap ic, ByteBuffer buffer ) else if ( typeId == PropertyType.BLOB.intValue() ) { int arrayLength = buffer.getInt(); - Blob[] result = StoreBlobIO.readBlobArray( ic, buffer, arrayLength ); + Blob[] result = StoreBlobIO.readBlobArray( buffer, arrayLength ); return Values.blobArray(result); } else if ( typeId == PropertyType.GEOMETRY.intValue() ) @@ -756,14 +754,14 @@ else if ( typeId == PropertyType.GEOMETRY.intValue() ) GeometryType.GeometryHeader header = GeometryType.GeometryHeader.fromArrayHeaderByteBuffer( buffer ); byte[] byteArray = new byte[buffer.limit() - buffer.position()]; buffer.get( byteArray ); - return GeometryType.decodeGeometryArray( ic, header, byteArray ); + return GeometryType.decodeGeometryArray( header, byteArray ); } else if ( typeId == PropertyType.TEMPORAL.intValue() ) { TemporalType.TemporalHeader header = TemporalType.TemporalHeader.fromArrayHeaderByteBuffer( buffer ); byte[] byteArray = new byte[buffer.limit() - buffer.position()]; buffer.get( byteArray ); - return TemporalType.decodeTemporalArray( ic, header, byteArray ); + return TemporalType.decodeTemporalArray( header, byteArray ); } else { diff --git a/src/blob/java/org/neo4j/kernel/impl/store/PropertyType.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/store/PropertyType.java similarity index 92% rename from src/blob/java/org/neo4j/kernel/impl/store/PropertyType.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/store/PropertyType.java index 747ab05c..9076591d 100644 --- a/src/blob/java/org/neo4j/kernel/impl/store/PropertyType.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/store/PropertyType.java @@ -21,9 +21,6 @@ import java.util.Arrays; import java.util.List; - -import cn.graiph.util.ContextMap; -import org.neo4j.kernel.impl.InstanceContext; import org.neo4j.kernel.impl.blob.StoreBlobIO; import org.neo4j.kernel.impl.store.format.standard.PropertyRecordFormat; import org.neo4j.kernel.impl.store.record.DynamicRecord; @@ -184,15 +181,14 @@ private byte[] headOf( byte[] bytes, int length ) } @Override - public void onPropertyDelete( ContextMap ic, PrimitiveRecord primitive, PropertyRecord propRecord, PropertyBlock block ) + public void onPropertyDelete( PrimitiveRecord primitive, PropertyRecord propRecord, PropertyBlock block ) { List values = block.getValueRecords(); byte itemType = values.get( 0 ).getData( )[0]; if ( itemType == BLOB.byteValue() ) { - BlobArray value = (BlobArray) DynamicArrayStore.getRightArray(ic, - AbstractDynamicStore.readFullByteArrayFromHeavyRecords( block.getValueRecords(), PropertyType.ARRAY ) ); - StoreBlobIO.deleteBlobArrayProperty( ic, value ); + BlobArray value = (BlobArray) DynamicArrayStore.getRightArray( AbstractDynamicStore.readFullByteArrayFromHeavyRecords( block.getValueRecords(), PropertyType.ARRAY ) ); + StoreBlobIO.deleteBlobArrayProperty( value ); } } }, @@ -257,7 +253,7 @@ public int calculateNumberOfBlocksUsed( long firstBlock ) @Override public Value value( PropertyBlock block, PropertyStore store ) { - return StoreBlobIO.readBlobValue( InstanceContext.of( store ), block ); + return StoreBlobIO.readBlobValue( block ); } @Override @@ -267,9 +263,9 @@ public int calculateNumberOfBlocksUsed( long firstBlock ) } @Override - public void onPropertyDelete( ContextMap ic, PrimitiveRecord primitive, PropertyRecord propRecord, PropertyBlock block ) + public void onPropertyDelete( PrimitiveRecord primitive, PropertyRecord propRecord, PropertyBlock block ) { - StoreBlobIO.deleteBlobProperty( ic, primitive, propRecord, block ); + StoreBlobIO.deleteBlobProperty( primitive, propRecord, block ); } }; @@ -388,7 +384,7 @@ public byte[] readDynamicRecordHeader( byte[] recordBytes ) throw new UnsupportedOperationException(); } - public void onPropertyDelete( ContextMap ic, PrimitiveRecord primitive, PropertyRecord propRecord, PropertyBlock block ) + public void onPropertyDelete( PrimitiveRecord primitive, PropertyRecord propRecord, PropertyBlock block ) { //do nothing } diff --git a/src/blob/java/org/neo4j/kernel/impl/util/BaseToObjectValueWriter.java b/blob-feature/src/main/java/org/neo4j/kernel/impl/util/BaseToObjectValueWriter.java similarity index 99% rename from src/blob/java/org/neo4j/kernel/impl/util/BaseToObjectValueWriter.java rename to blob-feature/src/main/java/org/neo4j/kernel/impl/util/BaseToObjectValueWriter.java index 76f3bfba..6aeceecd 100644 --- a/src/blob/java/org/neo4j/kernel/impl/util/BaseToObjectValueWriter.java +++ b/blob-feature/src/main/java/org/neo4j/kernel/impl/util/BaseToObjectValueWriter.java @@ -33,7 +33,7 @@ import java.util.Iterator; import java.util.List; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import org.neo4j.graphdb.Node; import org.neo4j.graphdb.Path; import org.neo4j.graphdb.PropertyContainer; diff --git a/src/blob/java/org/neo4j/values/storable/BlobArray.java b/blob-feature/src/main/java/org/neo4j/values/storable/BlobArray.java similarity index 96% rename from src/blob/java/org/neo4j/values/storable/BlobArray.java rename to blob-feature/src/main/java/org/neo4j/values/storable/BlobArray.java index e0898b0f..db4a2aa6 100644 --- a/src/blob/java/org/neo4j/values/storable/BlobArray.java +++ b/blob-feature/src/main/java/org/neo4j/values/storable/BlobArray.java @@ -20,7 +20,7 @@ package org.neo4j.values.storable; import org.neo4j.values.AnyValue; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import org.neo4j.values.ValueMapper; public class BlobArray extends NonPrimitiveArray @@ -61,7 +61,7 @@ public AnyValue value( int offset ) @Override public boolean equals( Value other ) { - return _support._equals( other ); + return _support.internalEquals( other ); } @Override diff --git a/src/blob/java/org/neo4j/values/storable/ValueWriter.java b/blob-feature/src/main/java/org/neo4j/values/storable/ValueWriter.java similarity index 99% rename from src/blob/java/org/neo4j/values/storable/ValueWriter.java rename to blob-feature/src/main/java/org/neo4j/values/storable/ValueWriter.java index ec54bf1e..acb1566c 100644 --- a/src/blob/java/org/neo4j/values/storable/ValueWriter.java +++ b/blob-feature/src/main/java/org/neo4j/values/storable/ValueWriter.java @@ -19,7 +19,7 @@ */ package org.neo4j.values.storable; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import java.nio.charset.StandardCharsets; import java.time.LocalDate; diff --git a/src/blob/java/org/neo4j/values/storable/Values.java b/blob-feature/src/main/java/org/neo4j/values/storable/Values.java similarity index 99% rename from src/blob/java/org/neo4j/values/storable/Values.java rename to blob-feature/src/main/java/org/neo4j/values/storable/Values.java index 6790ffe4..578383a7 100644 --- a/src/blob/java/org/neo4j/values/storable/Values.java +++ b/blob-feature/src/main/java/org/neo4j/values/storable/Values.java @@ -35,7 +35,7 @@ import java.util.List; import java.util.Objects; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import org.neo4j.graphdb.spatial.CRS; import org.neo4j.graphdb.spatial.Point; diff --git a/src/blob/java/org/neo4j/values/utils/PrettyPrinter.java b/blob-feature/src/main/java/org/neo4j/values/utils/PrettyPrinter.java similarity index 99% rename from src/blob/java/org/neo4j/values/utils/PrettyPrinter.java rename to blob-feature/src/main/java/org/neo4j/values/utils/PrettyPrinter.java index 33cb3cc4..7cae775b 100644 --- a/src/blob/java/org/neo4j/values/utils/PrettyPrinter.java +++ b/blob-feature/src/main/java/org/neo4j/values/utils/PrettyPrinter.java @@ -28,7 +28,7 @@ import java.util.Arrays; import java.util.Deque; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import org.neo4j.values.AnyValueWriter; import org.neo4j.values.storable.CoordinateReferenceSystem; import org.neo4j.values.storable.TextArray; diff --git a/blob-feature/src/main/scala/cn/pandadb/blob/module.scala b/blob-feature/src/main/scala/cn/pandadb/blob/module.scala new file mode 100644 index 00000000..3c82d0ad --- /dev/null +++ b/blob-feature/src/main/scala/cn/pandadb/blob/module.scala @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +package cn.pandadb.blob + +import java.io.File + +import cn.pandadb.context._ +import cn.pandadb.util.ConfigUtils._ +import cn.pandadb.util._ +import org.neo4j.kernel.impl.blob.{BlobStorage, DefaultBlobFunctions} +import org.neo4j.kernel.impl.proc.Procedures + +class BlobStorageModule extends PandaModule { + override def init(ctx: PandaModuleContext): Unit = { + //GraphDatabaseStartedEvent + PandaEventHub.trigger({ + case GraphDatabaseStartedEvent(proceduresService, storeDir, neo4jConf, databaseInfo) => + registerProcedure(proceduresService, classOf[DefaultBlobFunctions]); + }) + + val conf = ctx.configuration; + val blobStorage = BlobStorage.create(conf); + BlobStorageContext.bindBlobStorage(blobStorage); + BlobStorageContext.bindBlobStorageDir(conf.getAsFile("blob.storage.file.dir", ctx.storeDir, new File(ctx.storeDir, "/blob"))); + } + + private def registerProcedure(proceduresService: Procedures, procedures: Class[_]*) { + for (procedure <- procedures) { + proceduresService.registerProcedure(procedure); + proceduresService.registerFunction(procedure); + } + } + + override def close(ctx: PandaModuleContext): Unit = { + BlobStorageContext.blobStorage.close(ctx); + } + + override def start(ctx: PandaModuleContext): Unit = { + BlobStorageContext.blobStorage.start(ctx); + } +} + +object BlobStorageContext extends ContextMap { + def blobStorage: BlobStorage = get[BlobStorage](); + + def bindBlobStorage(blobStorage: BlobStorage): Unit = put[BlobStorage](blobStorage) + + def getDefaultBlobValueStorageClass: Option[String] = getOption("default-blob-value-storage-class") + + def bindBlobStorageDir(dir: File): Unit = put("blob.storage.file.dir", dir) + + def blobStorageDir: File = get("blob.storage.file.dir"); +} \ No newline at end of file diff --git a/src/graiph-database/scala/cn/graiph/db/cypher.scala b/blob-feature/src/main/scala/cn/pandadb/cypherplus/cypher_expr.scala similarity index 83% rename from src/graiph-database/scala/cn/graiph/db/cypher.scala rename to blob-feature/src/main/scala/cn/pandadb/cypherplus/cypher_expr.scala index e23bf2b2..5a1df0a8 100644 --- a/src/graiph-database/scala/cn/graiph/db/cypher.scala +++ b/blob-feature/src/main/scala/cn/pandadb/cypherplus/cypher_expr.scala @@ -1,8 +1,7 @@ -package cn.graiph.db +package cn.pandadb.cypherplus -import cn.graiph.util.{ContextMap, ReflectUtils} -import cn.graiph.{CustomPropertyProvider, ValueMatcher} -import ReflectUtils._ +import cn.pandadb.blob.BlobStorageContext +import org.neo4j.cypher.internal.runtime.interpreted.commands.AstNode import org.neo4j.cypher.internal.runtime.interpreted.commands.convert.{ExpressionConverters, ExtendedCommandExpr} import org.neo4j.cypher.internal.runtime.interpreted.commands.expressions.{Expression => CommandExpression} import org.neo4j.cypher.internal.runtime.interpreted.commands.predicates.Predicate @@ -13,20 +12,13 @@ import org.neo4j.cypher.internal.runtime.interpreted.{ExecutionContext, UpdateCo import org.neo4j.cypher.internal.v3_5.ast.semantics._ import org.neo4j.cypher.internal.v3_5.expressions.Expression.SemanticContext import org.neo4j.cypher.internal.v3_5.expressions._ -import org.neo4j.cypher.internal.v3_5.parser.Expressions import org.neo4j.cypher.internal.v3_5.util.InputPosition import org.neo4j.cypher.internal.v3_5.util.attribution.Id import org.neo4j.cypher.internal.v3_5.util.symbols._ -import org.neo4j.cypher.internal.v3_5.{expressions => ast} -import org.neo4j.kernel.configuration.Config import org.neo4j.values.AnyValue import org.neo4j.values.storable.{Value, _} import org.neo4j.values.virtual.VirtualValues -import org.parboiled.scala._ -/** - * Created by bluejoe on 2019/7/16. - */ case class AlgoNameWithThresholdExpr(algorithm: Option[String], threshold: Option[Double])(val position: InputPosition) extends Expression with ExtendedExpr { @@ -45,7 +37,7 @@ case class AlgoNameWithThresholdExpr(algorithm: Option[String], threshold: Optio case class CustomPropertyExpr(map: Expression, propertyKey: PropertyKeyName)(val position: InputPosition) extends LogicalProperty with ExtendedExpr with ExtendedCommandExpr { - override def asCanonicalStringVal = s"${map.asCanonicalStringVal}.${propertyKey.asCanonicalStringVal}" + override def asCanonicalStringVal: String = s"${map.asCanonicalStringVal}.${propertyKey.asCanonicalStringVal}" override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = CustomPropertyCommand(self.toCommandExpression(id, map), PropertyKey(this.propertyKey.name)) @@ -62,7 +54,7 @@ case class SemanticLikeExpr(lhs: Expression, ant: Option[AlgoNameWithThresholdEx TypeSignature(argumentTypes = Vector(CTAny, CTAny), outputType = CTBoolean) ) - override def canonicalOperatorSymbol = this.getClass.getSimpleName + override def canonicalOperatorSymbol: String = this.getClass.getSimpleName override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = SemanticLikeCommand(self.toCommandExpression(id, this.lhs), this.ant, self.toCommandExpression(id, this.rhs)) @@ -81,7 +73,7 @@ case class SemanticUnlikeExpr(lhs: Expression, ant: Option[AlgoNameWithThreshold override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = SemanticUnlikeCommand(self.toCommandExpression(id, this.lhs), this.ant, self.toCommandExpression(id, this.rhs)) - override def canonicalOperatorSymbol = this.getClass.getSimpleName + override def canonicalOperatorSymbol: String = this.getClass.getSimpleName override def check(ctx: SemanticContext): SemanticCheck = SemanticExpressionCheck.check(ctx, this.arguments) chain @@ -97,7 +89,7 @@ case class SemanticCompareExpr(lhs: Expression, ant: Option[AlgoNameWithThreshol override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = SemanticCompareCommand(self.toCommandExpression(id, this.lhs), this.ant, self.toCommandExpression(id, this.rhs)) - override def canonicalOperatorSymbol = this.getClass.getSimpleName + override def canonicalOperatorSymbol: String = this.getClass.getSimpleName override def check(ctx: SemanticContext): SemanticCheck = SemanticExpressionCheck.check(ctx, this.arguments) chain @@ -113,7 +105,7 @@ case class SemanticSetCompareExpr(lhs: Expression, ant: Option[AlgoNameWithThres override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = SemanticSetCompareCommand(self.toCommandExpression(id, this.lhs), this.ant, self.toCommandExpression(id, this.rhs)) - override def canonicalOperatorSymbol = this.getClass.getSimpleName + override def canonicalOperatorSymbol: String = this.getClass.getSimpleName override def check(ctx: SemanticContext): SemanticCheck = SemanticExpressionCheck.check(ctx, this.arguments) chain @@ -129,7 +121,7 @@ case class SemanticContainExpr(lhs: Expression, ant: Option[AlgoNameWithThreshol override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = SemanticContainCommand(self.toCommandExpression(id, this.lhs), this.ant, self.toCommandExpression(id, this.rhs)) - override def canonicalOperatorSymbol = this.getClass.getSimpleName + override def canonicalOperatorSymbol: String = this.getClass.getSimpleName override def check(ctx: SemanticContext): SemanticCheck = SemanticExpressionCheck.check(ctx, this.arguments) chain @@ -145,7 +137,7 @@ case class SemanticInExpr(lhs: Expression, ant: Option[AlgoNameWithThresholdExpr override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = SemanticInCommand(self.toCommandExpression(id, this.lhs), this.ant, self.toCommandExpression(id, this.rhs)) - override def canonicalOperatorSymbol = this.getClass.getSimpleName + override def canonicalOperatorSymbol: String = this.getClass.getSimpleName override def check(ctx: SemanticContext): SemanticCheck = SemanticExpressionCheck.check(ctx, this.arguments) chain @@ -161,7 +153,7 @@ case class SemanticContainSetExpr(lhs: Expression, ant: Option[AlgoNameWithThres override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = SemanticContainSetCommand(self.toCommandExpression(id, this.lhs), this.ant, self.toCommandExpression(id, this.rhs)) - override def canonicalOperatorSymbol = this.getClass.getSimpleName + override def canonicalOperatorSymbol: String = this.getClass.getSimpleName override def check(ctx: SemanticContext): SemanticCheck = SemanticExpressionCheck.check(ctx, this.arguments) chain @@ -174,7 +166,7 @@ case class SemanticSetInExpr(lhs: Expression, ant: Option[AlgoNameWithThresholdE TypeSignature(argumentTypes = Vector(CTAny, CTAny), outputType = CTBoolean) ) - override def canonicalOperatorSymbol = this.getClass.getSimpleName + override def canonicalOperatorSymbol: String = this.getClass.getSimpleName override def makeCommand(id: Id, self: ExpressionConverters): CommandExpression = SemanticSetInCommand(self.toCommandExpression(id, this.lhs), this.ant, self.toCommandExpression(id, this.rhs)) @@ -187,19 +179,11 @@ case class SemanticSetInExpr(lhs: Expression, ant: Option[AlgoNameWithThresholdE /////////////commands///////////// case class QueryStateEx(state: QueryState) { - def getInstanceContext(): ContextMap = { - val config = state match { - case x: UpdateCountingQueryContext => state._get("query.inner.inner.transactionalContext.tc.graph.graph.config") - case _ => state._get("query.inner.transactionalContext.tc.graph.graph.config") - } - config.asInstanceOf[Config].getInstanceContext - } - def getCustomPropertyProvider(): CustomPropertyProvider = - getInstanceContext().get[CustomPropertyProvider]() + CypherPlusContext.customPropertyProvider def getValueMatcher(): ValueMatcher = - getInstanceContext().get[ValueMatcher]() + CypherPlusContext.valueMatcher } case class CustomPropertyCommand(mapExpr: CommandExpression, propertyKey: KeyToken) @@ -216,15 +200,15 @@ case class CustomPropertyCommand(mapExpr: CommandExpression, propertyKey: KeyTok pv.map(Values.unsafeOf(_, true)).getOrElse(Values.NO_VALUE) } - def rewrite(f: (CommandExpression) => CommandExpression) = f(CustomPropertyCommand(mapExpr.rewrite(f), propertyKey.rewrite(f))) + def rewrite(f: (CommandExpression) => CommandExpression): CommandExpression = f(CustomPropertyCommand(mapExpr.rewrite(f), propertyKey.rewrite(f))) - override def children = Seq(mapExpr, propertyKey) + override def children: Seq[AstNode[_]] = Seq(mapExpr, propertyKey) - def arguments = Seq(mapExpr) + def arguments: Seq[CommandExpression] = Seq(mapExpr) - def symbolTableDependencies = mapExpr.symbolTableDependencies + def symbolTableDependencies: Set[String] = mapExpr.symbolTableDependencies - override def toString = s"$mapExpr.${propertyKey.name}" + override def toString: String = s"$mapExpr.${propertyKey.name}" } trait SemanticOperatorSupport { @@ -249,15 +233,15 @@ trait SemanticOperatorSupport { override def toString: String = lhsExpr.toString() + this.getOperatorString + rhsExpr.toString() - def containsIsNull = false + def containsIsNull: Boolean = false - def rewrite(f: (CommandExpression) => CommandExpression) = f(rhsExpr.rewrite(f) match { + def rewrite(f: (CommandExpression) => CommandExpression): CommandExpression = f(rhsExpr.rewrite(f) match { case other => rewriteMethod(lhsExpr.rewrite(f), ant, other) }) - def arguments = Seq(lhsExpr, rhsExpr) + def arguments: Seq[CommandExpression] = Seq(lhsExpr, rhsExpr) - def symbolTableDependencies = lhsExpr.symbolTableDependencies ++ rhsExpr.symbolTableDependencies + def symbolTableDependencies: Set[String] = lhsExpr.symbolTableDependencies ++ rhsExpr.symbolTableDependencies } case class SemanticLikeCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameWithThresholdExpr], rhsExpr: CommandExpression) @@ -276,7 +260,8 @@ case class SemanticLikeCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameW override def getOperatorString: String = "~:" - override def rewriteMethod = SemanticLikeCommand(_, _, _)(converter) + override def rewriteMethod: (CommandExpression, Option[AlgoNameWithThresholdExpr], CommandExpression) => CommandExpression = + SemanticLikeCommand(_, _, _)(converter) } @@ -290,7 +275,8 @@ case class SemanticUnlikeCommand(lhsExpr: CommandExpression, ant: Option[AlgoNam override def getOperatorString: String = "!:" - override def rewriteMethod = SemanticUnlikeCommand(_, _, _)(converter) + override def rewriteMethod: (CommandExpression, Option[AlgoNameWithThresholdExpr], CommandExpression) => CommandExpression = + SemanticUnlikeCommand(_, _, _)(converter) } case class SemanticContainCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameWithThresholdExpr], rhsExpr: CommandExpression) @@ -310,7 +296,8 @@ case class SemanticContainCommand(lhsExpr: CommandExpression, ant: Option[AlgoNa override def getOperatorString: String = ">:" - override def rewriteMethod = SemanticContainCommand(_, _, _)(converter) + override def rewriteMethod: (CommandExpression, Option[AlgoNameWithThresholdExpr], CommandExpression) => CommandExpression = + SemanticContainCommand(_, _, _)(converter) } case class SemanticInCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameWithThresholdExpr], rhsExpr: CommandExpression) @@ -322,7 +309,8 @@ case class SemanticInCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameWit override def getOperatorString: String = "<:" - override def rewriteMethod = SemanticInCommand(_, _, _)(converter) + override def rewriteMethod: (CommandExpression, Option[AlgoNameWithThresholdExpr], CommandExpression) => CommandExpression = + SemanticInCommand(_, _, _)(converter) } case class SemanticContainSetCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameWithThresholdExpr], rhsExpr: CommandExpression) @@ -342,7 +330,8 @@ case class SemanticContainSetCommand(lhsExpr: CommandExpression, ant: Option[Alg override def getOperatorString: String = ">>:" - override def rewriteMethod = SemanticContainSetCommand(_, _, _)(converter) + override def rewriteMethod: (CommandExpression, Option[AlgoNameWithThresholdExpr], CommandExpression) => CommandExpression = + SemanticContainSetCommand(_, _, _)(converter) } case class SemanticSetInCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameWithThresholdExpr], rhsExpr: CommandExpression) @@ -354,7 +343,8 @@ case class SemanticSetInCommand(lhsExpr: CommandExpression, ant: Option[AlgoName override def getOperatorString: String = "<<:" - override def rewriteMethod = SemanticSetInCommand(_, _, _)(converter) + override def rewriteMethod: (CommandExpression, Option[AlgoNameWithThresholdExpr], CommandExpression) => CommandExpression = + SemanticSetInCommand(_, _, _)(converter) } case class SemanticCompareCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameWithThresholdExpr], rhsExpr: CommandExpression) @@ -373,7 +363,8 @@ case class SemanticCompareCommand(lhsExpr: CommandExpression, ant: Option[AlgoNa override def getOperatorString: String = "::" - override def rewriteMethod = SemanticCompareCommand(_, _, _)(converter) + override def rewriteMethod: (CommandExpression, Option[AlgoNameWithThresholdExpr], CommandExpression) => CommandExpression = + SemanticCompareCommand(_, _, _)(converter) } case class SemanticSetCompareCommand(lhsExpr: CommandExpression, ant: Option[AlgoNameWithThresholdExpr], rhsExpr: CommandExpression) @@ -396,9 +387,6 @@ case class SemanticSetCompareCommand(lhsExpr: CommandExpression, ant: Option[Alg override def getOperatorString: String = ":::" - override def rewriteMethod = SemanticSetCompareCommand(_, _, _)(converter) -} - -class InvalidSemanticOperatorException(compared: AnyValue) extends RuntimeException { - + override def rewriteMethod: (CommandExpression, Option[AlgoNameWithThresholdExpr], CommandExpression) => CommandExpression = + SemanticSetCompareCommand(_, _, _)(converter) } \ No newline at end of file diff --git a/blob-feature/src/main/scala/cn/pandadb/cypherplus/module.scala b/blob-feature/src/main/scala/cn/pandadb/cypherplus/module.scala new file mode 100644 index 00000000..33d38542 --- /dev/null +++ b/blob-feature/src/main/scala/cn/pandadb/cypherplus/module.scala @@ -0,0 +1,61 @@ +package cn.pandadb.cypherplus + +import java.io.File + +import cn.pandadb.blob.CypherPluginRegistry +import cn.pandadb.util._ +import org.springframework.context.support.FileSystemXmlApplicationContext + +class CypherPlusModule extends PandaModule with Logging { + override def init(ctx: PandaModuleContext): Unit = { + val conf = ctx.configuration; + val cypherPluginRegistry = conf.getRaw("blob.plugins.conf").map(x => { + val xml = new File(x); + + val path = + if (xml.isAbsolute) { + xml.getPath + } + else { + val configFilePath = conf.getRaw("config.file.path") + if (configFilePath.isDefined) { + new File(new File(configFilePath.get).getParentFile, x).getAbsoluteFile.getCanonicalPath + } + else { + xml.getAbsoluteFile.getCanonicalPath + } + } + + logger.info(s"loading semantic plugins: $path"); + val appctx = new FileSystemXmlApplicationContext("file:" + path); + appctx.getBean[CypherPluginRegistry](classOf[CypherPluginRegistry]); + }).getOrElse { + logger.info(s"semantic plugins not loaded: blob.plugins.conf=null"); + new CypherPluginRegistry() + } + + val customPropertyProvider = cypherPluginRegistry.createCustomPropertyProvider(conf); + val valueMatcher = cypherPluginRegistry.createValueComparatorRegistry(conf); + + CypherPlusContext.bindCustomPropertyProvider(customPropertyProvider); + CypherPlusContext.bindValueMatcher(valueMatcher); + } + + override def close(ctx: PandaModuleContext): Unit = { + + } + + override def start(ctx: PandaModuleContext): Unit = { + + } +} + +object CypherPlusContext extends ContextMap { + def customPropertyProvider: CustomPropertyProvider = get[CustomPropertyProvider](); + + def bindCustomPropertyProvider(customPropertyProvider: CustomPropertyProvider): Unit = put[CustomPropertyProvider](customPropertyProvider); + + def valueMatcher: ValueMatcher = get[ValueMatcher](); + + def bindValueMatcher(valueMatcher: ValueMatcher): Unit = put[ValueMatcher](valueMatcher); +} \ No newline at end of file diff --git a/src/blob/scala/org/neo4j/bolt/blob/messages.scala b/blob-feature/src/main/scala/org/neo4j/bolt/blob/messages.scala similarity index 91% rename from src/blob/scala/org/neo4j/bolt/blob/messages.scala rename to blob-feature/src/main/scala/org/neo4j/bolt/blob/messages.scala index 986551f3..fed50bb6 100644 --- a/src/blob/scala/org/neo4j/bolt/blob/messages.scala +++ b/blob-feature/src/main/scala/org/neo4j/bolt/blob/messages.scala @@ -21,7 +21,8 @@ package org.neo4j.bolt.blob import java.io.{ByteArrayInputStream, InputStream} -import cn.graiph.blob.{MimeType, BlobMessageSignature, Blob, InputStreamSource} +import cn.pandadb.blob.{MimeType, BlobMessageSignature, Blob, InputStreamSource} +import cn.pandadb.util.PandaException import org.neo4j.bolt.messaging.Neo4jPack.Unpacker import org.neo4j.bolt.messaging.{RequestMessage, RequestMessageDecoder} import org.neo4j.bolt.runtime.BoltResult.Visitor @@ -41,10 +42,10 @@ trait RequestMessageHandler { class GetBlobMessage(val blobId: String) extends RequestMessage with RequestMessageHandler { override def safeToProcessInAnyState(): Boolean = false; - override def toString = s"GET_BLOB(id=$blobId)"; + override def toString: String = s"GET_BLOB(id=$blobId)"; @throws[Exception] - override def accepts(context: StateMachineContext) = { + override def accepts(context: StateMachineContext): Unit = { val opt: Option[Blob] = TransactionalBlobCache.get(blobId) if (opt.isDefined) { context.connectionState.onRecords(new BoltResult() { @@ -86,7 +87,6 @@ class GetBlobMessage(val blobId: String) extends RequestMessage with RequestMess override def close(): Unit = { //TODO - println("server side: CLOSE!!!!!!!!"); } }, true); } @@ -107,7 +107,7 @@ class GetBlobMessageDecoder(val responseHandler: BoltResponseHandler) extends Re } class InvalidBlobHandleException(blobId: String) - extends RuntimeException(s"invalid blob handle: $blobId, make sure it is within an active transaction") { + extends PandaException(s"invalid blob handle: $blobId, make sure it is within an active transaction") { } diff --git a/src/blob/scala/org/neo4j/bolt/blob/utils.scala b/blob-feature/src/main/scala/org/neo4j/bolt/blob/utils.scala similarity index 91% rename from src/blob/scala/org/neo4j/bolt/blob/utils.scala rename to blob-feature/src/main/scala/org/neo4j/bolt/blob/utils.scala index f67adbb8..9a61ca12 100644 --- a/src/blob/scala/org/neo4j/bolt/blob/utils.scala +++ b/blob-feature/src/main/scala/org/neo4j/bolt/blob/utils.scala @@ -21,8 +21,8 @@ package org.neo4j.bolt.blob import java.util.concurrent.atomic.AtomicInteger -import cn.graiph.blob.{BlobIO, BlobId, Blob, BlobEntry} -import cn.graiph.util.{StreamUtils, ReflectUtils} +import cn.pandadb.blob.{BlobIO, BlobId, Blob, BlobEntry} +import cn.pandadb.util.{StreamUtils, ReflectUtils} import org.apache.commons.codec.digest.DigestUtils import ReflectUtils._ import org.neo4j.kernel.api.KernelTransaction @@ -103,9 +103,9 @@ object BoltTransactionListener { private def transactionId(kt: TopLevelTransaction) = "" + kt.hashCode(); - def currentTopLevelTransaction() = _local.get(); + def currentTopLevelTransaction(): TopLevelTransaction = _local.get(); - def currentTopLevelTransactionId() = transactionId(_local.get()); + def currentTopLevelTransactionId(): String = transactionId(_local.get()); def onTransactionCreate(tx: InternalTransaction): Unit = { tx match { @@ -120,9 +120,7 @@ object BoltTransactionListener { } }) - case _ => { - - } + case _ => } } } @@ -161,17 +159,11 @@ object TransactionalBlobCache { bid; }; - def dump(): Unit = { - println("<<<<<<<<<<<<<<<<<") - _blobCache.foreach(println); - println(">>>>>>>>>>>>>>>>>") - } - def get(bid: String): Option[Blob] = { _blobCache.get(bid).map(_.blob) }; - def invalidate(transactionId: String) = { + def invalidate(transactionId: String): Unit = { _blobCache --= _blobCache.filter(_._2.transactionId.equals(transactionId)).map(_._1); }; } \ No newline at end of file diff --git a/src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/CastSupport.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/CastSupport.scala similarity index 99% rename from src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/CastSupport.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/CastSupport.scala index 25a98118..5a480742 100644 --- a/src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/CastSupport.scala +++ b/blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/CastSupport.scala @@ -21,7 +21,7 @@ package org.neo4j.cypher.internal.runtime.interpreted import java.time._ import java.time.temporal.TemporalAmount -import cn.graiph.blob.Blob +import cn.pandadb.blob.Blob import org.neo4j.cypher.internal.v3_5.util.CypherTypeException import org.neo4j.graphdb.spatial.Point import org.neo4j.values.storable.{ArrayValue, _} diff --git a/src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/TransactionBoundPlanContext.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/TransactionBoundPlanContext.scala similarity index 97% rename from src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/TransactionBoundPlanContext.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/TransactionBoundPlanContext.scala index b381ce27..e05b3a3e 100644 --- a/src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/TransactionBoundPlanContext.scala +++ b/blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/TransactionBoundPlanContext.scala @@ -40,7 +40,7 @@ import org.neo4j.cypher.internal.v3_5.util.{CypherExecutionException, LabelId, P import scala.collection.JavaConverters._ object TransactionBoundPlanContext { - def apply(tc: TransactionalContextWrapper, logger: InternalNotificationLogger) = + def apply(tc: TransactionalContextWrapper, logger: InternalNotificationLogger): TransactionBoundPlanContext = new TransactionBoundPlanContext(tc, logger, InstrumentedGraphStatistics(TransactionBoundGraphStatistics(tc.dataRead, tc.schemaRead), new MutableGraphStatisticsSnapshot())) @@ -66,7 +66,7 @@ class TransactionBoundPlanContext(tc: TransactionalContextWrapper, logger: Inter override def indexGetForLabelAndProperties(labelName: String, propertyKeys: Seq[String]): Option[IndexDescriptor] = evalOrNone { try { val descriptor = toLabelSchemaDescriptor(this, labelName, propertyKeys) - getOnlineIndex(tc.schemaRead.index(descriptor.getLabelId, descriptor.getPropertyIds:_*)) + getOnlineIndex(tc.schemaRead.index(descriptor.getLabelId, descriptor.getPropertyIds: _*)) } catch { case _: KernelException => None } @@ -132,7 +132,8 @@ class TransactionBoundPlanContext(tc: TransactionalContextWrapper, logger: Inter case _: types.GeometryType | _: types.PointType => ValueCategory.GEOMETRY - case _: types.DateTimeType | _: types.LocalDateTimeType | _: types.DateType | _: types.TimeType | _: types.LocalTimeType | _: types.DurationType => + case _: types.DateTimeType | _: types.LocalDateTimeType | _: types.DateType | _: types.TimeType | + _: types.LocalTimeType | _: types.DurationType => ValueCategory.TEMPORAL // For everything else, we don't know @@ -167,7 +168,7 @@ class TransactionBoundPlanContext(tc: TransactionalContextWrapper, logger: Inter override val txIdProvider = LastCommittedTxIdProvider(tc.graph) - override def procedureSignature(name: QualifiedName) = { + override def procedureSignature(name: QualifiedName): ProcedureSignature = { val kn = new procs.QualifiedName(name.namespace.asJava, name.name) val procedures = tc.kernelTransaction.procedures() val handle = procedures.procedureGet(kn) diff --git a/src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/coerce.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/coerce.scala similarity index 100% rename from src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/coerce.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/coerce.scala diff --git a/src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/convert/CommunityExpressionConverter.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/convert/CommunityExpressionConverter.scala similarity index 95% rename from src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/convert/CommunityExpressionConverter.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/convert/CommunityExpressionConverter.scala index 35ea318c..ef0ff944 100644 --- a/src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/convert/CommunityExpressionConverter.scala +++ b/blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/convert/CommunityExpressionConverter.scala @@ -50,7 +50,7 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr override def toCommandProjection(id: Id, projections: Map[String, Expression], self: ExpressionConverters): Option[CommandProjection] = { - val projected = for ((k,Some(v)) <- projections.mapValues(e => toCommandExpression(id, e, self))) yield (k,v) + val projected = for ((k, Some(v)) <- projections.mapValues(e => toCommandExpression(id, e, self ))) yield (k, v) if (projected.size < projections.size) None else Some(InterpretedCommandProjection(projected)) } @@ -68,8 +68,8 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr case e: ast.Or => predicates.Or(self.toCommandPredicate(id, e.lhs), self.toCommandPredicate(id, e.rhs)) case e: ast.Xor => predicates.Xor(self.toCommandPredicate(id, e.lhs), self.toCommandPredicate(id, e.rhs)) case e: ast.And => predicates.And(self.toCommandPredicate(id, e.lhs), self.toCommandPredicate(id, e.rhs)) - case e: ast.Ands => predicates.Ands(NonEmptyList.from(e.exprs.map(self.toCommandPredicate(id,_)))) - case e: ast.Ors => predicates.Ors(NonEmptyList.from(e.exprs.map(self.toCommandPredicate(id,_)))) + case e: ast.Ands => predicates.Ands(NonEmptyList.from(e.exprs.map(self.toCommandPredicate(id, _)))) + case e: ast.Ors => predicates.Ors(NonEmptyList.from(e.exprs.map(self.toCommandPredicate(id, _)))) case e: ast.Not => predicates.Not(self.toCommandPredicate(id, e.rhs)) case e: ast.Equals => predicates.Equals(self.toCommandExpression(id, e.lhs), self.toCommandExpression(id, e.rhs)) case e: ast.NotEquals => predicates @@ -124,17 +124,17 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr self.toCommandExpression(id, e.scope.extractExpression.get)) case e: ast.ListComprehension => listComprehension(id, e, self) case e: ast.AllIterablePredicate => commands.AllInList(self.toCommandExpression(id, e.expression), e.variable.name, - e.innerPredicate.map(self.toCommandPredicate(id,_)) + e.innerPredicate.map(self.toCommandPredicate(id, _)) .getOrElse(predicates.True())) case e: ast.AnyIterablePredicate => commands.AnyInList(self.toCommandExpression(id, e.expression), e.variable.name, - e.innerPredicate.map(self.toCommandPredicate(id,_)) + e.innerPredicate.map(self.toCommandPredicate(id, _)) .getOrElse(predicates.True())) case e: ast.NoneIterablePredicate => commands.NoneInList(self.toCommandExpression(id, e.expression), e.variable.name, - e.innerPredicate.map(self.toCommandPredicate(id,_)) + e.innerPredicate.map(self.toCommandPredicate(id, _)) .getOrElse(predicates.True())) case e: ast.SingleIterablePredicate => commands .SingleInList(self.toCommandExpression(id, e.expression), e.variable.name, - e.innerPredicate.map(self.toCommandPredicate(id,_)).getOrElse(predicates.True())) + e.innerPredicate.map(self.toCommandPredicate(id, _)).getOrElse(predicates.True())) case e: ast.ReduceExpression => commandexpressions .ReduceFunction(self.toCommandExpression(id, e.list), e.variable.name, self.toCommandExpression(id, e.expression), e.accumulator.name, self.toCommandExpression(id, e.init)) @@ -143,9 +143,9 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr .NestedPipeExpression(e.pipe, self.toCommandExpression(id, e.projection)) case e: ast.GetDegree => getDegree(id, e, self) case e: PrefixSeekRangeWrapper => commandexpressions - .PrefixSeekRangeExpression(e.range.map(self.toCommandExpression(id,_))) - case e: InequalitySeekRangeWrapper => InequalitySeekRangeExpression(e.range.mapBounds(self.toCommandExpression(id,_))) - case e: PointDistanceSeekRangeWrapper => PointDistanceSeekRangeExpression(e.range.map(self.toCommandExpression(id,_))) + .PrefixSeekRangeExpression(e.range.map(self.toCommandExpression(id, _))) + case e: InequalitySeekRangeWrapper => InequalitySeekRangeExpression(e.range.mapBounds(self.toCommandExpression(id, _))) + case e: PointDistanceSeekRangeWrapper => PointDistanceSeekRangeExpression(e.range.map(self.toCommandExpression(id, _))) case e: ast.AndedPropertyInequalities => predicates .AndedPropertyComparablePredicates(variable(e.variable), toCommandProperty(id, e.property, self), e.inequalities.map(e => inequalityExpression(id, e, self))) @@ -154,7 +154,7 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr case e: ResolvedFunctionInvocation => val callArgumentCommands = e.callArguments.map(Some(_)) .zipAll(e.fcnSignature.get.inputSignature.map(_.default.map(_.value)), None, None).map { - case (given, default) => given.map(self.toCommandExpression(id,_)) + case (given, default) => given.map(self.toCommandExpression(id, _)) .getOrElse(commandexpressions.Literal(default.get)) } val signature = e.fcnSignature.get @@ -187,28 +187,34 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr case Avg => val inner = self.toCommandExpression(id, invocation.arguments.head) val command = commandexpressions.Avg(inner) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, inner) - else + } + else { command + } case Ceil => commandexpressions.CeilFunction(self.toCommandExpression(id, invocation.arguments.head)) case Coalesce => commandexpressions.CoalesceFunction(toCommandExpression(id, invocation.arguments, self): _*) case Collect => val inner = self.toCommandExpression(id, invocation.arguments.head) val command = commandexpressions.Collect(inner) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, inner) - else + } + else { command + } case Cos => commandexpressions.CosFunction(self.toCommandExpression(id, invocation.arguments.head)) case Cot => commandexpressions.CotFunction(self.toCommandExpression(id, invocation.arguments.head)) case Count => val inner = self.toCommandExpression(id, invocation.arguments.head) val command = commandexpressions.Count(inner) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, inner) - else + } + else { command + } case Degrees => commandexpressions.DegreesFunction(self.toCommandExpression(id, invocation.arguments.head)) case E => commandexpressions.EFunction() case EndNode => commandexpressions @@ -257,36 +263,44 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr case Max => val inner = self.toCommandExpression(id, invocation.arguments.head) val command = commandexpressions.Max(inner) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, inner) - else + } + else { command + } case Min => val inner = self.toCommandExpression(id, invocation.arguments.head) val command = commandexpressions.Min(inner) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, inner) - else + } + else { command + } case Nodes => commandexpressions.NodesFunction(self.toCommandExpression(id, invocation.arguments.head)) case PercentileCont => val firstArg = self.toCommandExpression(id, invocation.arguments.head) val secondArg = self.toCommandExpression(id, invocation.arguments(1)) val command = commandexpressions.PercentileCont(firstArg, secondArg) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, firstArg) - else + } + else { command + } case PercentileDisc => val firstArg = self.toCommandExpression(id, invocation.arguments.head) val secondArg = self.toCommandExpression(id, invocation.arguments(1)) val command = commandexpressions.PercentileDisc(firstArg, secondArg) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, firstArg) - else + } + else { command + } case Pi => commandexpressions.PiFunction() case Distance => val firstArg = self.toCommandExpression(id, invocation.arguments.head) @@ -330,17 +344,21 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr case StdDev => val inner = self.toCommandExpression(id, invocation.arguments.head) val command = commandexpressions.Stdev(inner) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, inner) - else + } + else { command + } case StdDevP => val inner = self.toCommandExpression(id, invocation.arguments.head) val command = commandexpressions.StdevP(inner) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, inner) - else + } + else { command + } case Substring => commandexpressions.SubstringFunction( self.toCommandExpression(id, invocation.arguments.head), @@ -350,10 +368,12 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr case Sum => val inner = self.toCommandExpression(id, invocation.arguments.head) val command = commandexpressions.Sum(inner) - if (invocation.distinct) + if (invocation.distinct) { commandexpressions.Distinct(command, inner) - else + } + else { command + } case Tail => commandexpressions.ListSlice( self.toCommandExpression(id, invocation.arguments.head), @@ -379,11 +399,11 @@ case class CommunityExpressionConverter(tokenContext: TokenContext) extends Expr private def toCommandExpression(id: Id, expression: Option[ast.Expression], self: ExpressionConverters): Option[CommandExpression] = - expression.map(self.toCommandExpression(id,_)) + expression.map(self.toCommandExpression(id, _)) private def toCommandExpression(id: Id, expressions: Seq[ast.Expression], self: ExpressionConverters): Seq[CommandExpression] = - expressions.map(self.toCommandExpression(id,_)) + expressions.map(self.toCommandExpression(id, _)) private def variable(e: ast.LogicalVariable) = commands.expressions.Variable(e.name) diff --git a/src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/expressions/cmd_blob.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/expressions/cmd_blob.scala similarity index 100% rename from src/blob/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/expressions/cmd_blob.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/commands/expressions/cmd_blob.scala diff --git a/src/blob/scala/org/neo4j/cypher/internal/v3_5/ast/semantics/SemanticExpressionCheck.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/ast/semantics/SemanticExpressionCheck.scala similarity index 100% rename from src/blob/scala/org/neo4j/cypher/internal/v3_5/ast/semantics/SemanticExpressionCheck.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/ast/semantics/SemanticExpressionCheck.scala diff --git a/src/blob/scala/org/neo4j/cypher/internal/v3_5/expressions/ast_blob.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/expressions/ast_blob.scala similarity index 98% rename from src/blob/scala/org/neo4j/cypher/internal/v3_5/expressions/ast_blob.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/expressions/ast_blob.scala index abf6ef28..32490e37 100644 --- a/src/blob/scala/org/neo4j/cypher/internal/v3_5/expressions/ast_blob.scala +++ b/blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/expressions/ast_blob.scala @@ -19,7 +19,7 @@ package org.neo4j.cypher.internal.v3_5.expressions import java.io.File -import cn.graiph.blob.Blob +import cn.pandadb.blob.Blob import org.apache.commons.codec.binary.Base64 import org.neo4j.cypher.internal.v3_5.util.InputPosition import org.neo4j.cypher.internal.v3_5.{expressions => ast} diff --git a/src/blob/scala/org/neo4j/cypher/internal/v3_5/parser/Expressions.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/parser/Expressions.scala similarity index 99% rename from src/blob/scala/org/neo4j/cypher/internal/v3_5/parser/Expressions.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/parser/Expressions.scala index 39434736..8a06a76b 100644 --- a/src/blob/scala/org/neo4j/cypher/internal/v3_5/parser/Expressions.scala +++ b/blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/parser/Expressions.scala @@ -16,7 +16,7 @@ */ package org.neo4j.cypher.internal.v3_5.parser -import cn.graiph.db._ +import cn.pandadb.cypherplus._ import org.neo4j.cypher.internal.v3_5.expressions.{Variable, _} import org.neo4j.cypher.internal.v3_5.util.InputPosition import org.neo4j.cypher.internal.v3_5.expressions diff --git a/src/blob/scala/org/neo4j/cypher/internal/v3_5/util/symbols/BlobType.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/util/symbols/BlobType.scala similarity index 100% rename from src/blob/scala/org/neo4j/cypher/internal/v3_5/util/symbols/BlobType.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/util/symbols/BlobType.scala diff --git a/src/blob/scala/org/neo4j/cypher/internal/v3_5/util/symbols/TypeSpec.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/util/symbols/TypeSpec.scala similarity index 100% rename from src/blob/scala/org/neo4j/cypher/internal/v3_5/util/symbols/TypeSpec.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/util/symbols/TypeSpec.scala diff --git a/src/blob/scala/org/neo4j/cypher/internal/v3_5/util/symbols/package.scala b/blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/util/symbols/package.scala similarity index 100% rename from src/blob/scala/org/neo4j/cypher/internal/v3_5/util/symbols/package.scala rename to blob-feature/src/main/scala/org/neo4j/cypher/internal/v3_5/util/symbols/package.scala diff --git a/src/blob/scala/org/neo4j/kernel/impl/blob/DefaultBlobFunctions.scala b/blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/DefaultBlobFunctions.scala similarity index 97% rename from src/blob/scala/org/neo4j/kernel/impl/blob/DefaultBlobFunctions.scala rename to blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/DefaultBlobFunctions.scala index ed57a578..b62a17d7 100644 --- a/src/blob/scala/org/neo4j/kernel/impl/blob/DefaultBlobFunctions.scala +++ b/blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/DefaultBlobFunctions.scala @@ -21,7 +21,8 @@ package org.neo4j.kernel.impl.blob import java.io.{File, FileInputStream} -import cn.graiph.blob.{MimeType, Blob} +import cn.pandadb.blob.{MimeType, Blob} +import cn.pandadb.util.PandaException import org.apache.commons.io.IOUtils import org.neo4j.procedure.{Description, Name, UserFunction} @@ -186,6 +187,6 @@ class DefaultBlobFunctions { } } -class CypherFunctionException(msg: String) extends RuntimeException(msg) { +class CypherFunctionException(msg: String) extends PandaException(msg) { } \ No newline at end of file diff --git a/src/blob/scala/org/neo4j/kernel/impl/blob/StoreBlobIO.scala b/blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/StoreBlobIO.scala similarity index 58% rename from src/blob/scala/org/neo4j/kernel/impl/blob/StoreBlobIO.scala rename to blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/StoreBlobIO.scala index 370617ad..0611f1b9 100644 --- a/src/blob/scala/org/neo4j/kernel/impl/blob/StoreBlobIO.scala +++ b/blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/StoreBlobIO.scala @@ -22,8 +22,8 @@ package org.neo4j.kernel.impl.blob import java.io.InputStream import java.nio.ByteBuffer -import cn.graiph.blob._ -import cn.graiph.util.{StreamUtils, ContextMap, Logging} +import cn.pandadb.blob._ +import cn.pandadb.util.{PandaException, StreamUtils, ContextMap, Logging} import org.neo4j.kernel.impl.store.record.{PrimitiveRecord, PropertyBlock, PropertyRecord} import org.neo4j.values.storable.{BlobArray, BlobValue} @@ -31,52 +31,51 @@ import org.neo4j.values.storable.{BlobArray, BlobValue} * Created by bluejoe on 2019/3/29. */ object StoreBlobIO extends Logging { - def saveAndEncodeBlobAsByteArray(ic: ContextMap, blob: Blob): Array[Byte] = { - val bid = ic.get[BlobStorage].save(blob); + + def saveAndEncodeBlobAsByteArray(blob: Blob): Array[Byte] = { + val bid = BlobStorageContext.blobStorage.save(blob); BlobIO.pack(Blob.makeEntry(bid, blob)); } - def saveBlob(ic: ContextMap, blob: Blob, keyId: Int, block: PropertyBlock) = { - val bid = ic.get[BlobStorage].save(blob); + def saveBlob(blob: Blob, keyId: Int, block: PropertyBlock) { + val bid = BlobStorageContext.blobStorage.save(blob); block.setValueBlocks(BlobIO._pack(Blob.makeEntry(bid, blob), keyId)); } - def deleteBlobArrayProperty(ic: ContextMap, blobs: BlobArray): Unit = { - ic.get[BlobStorage].deleteBatch( + def deleteBlobArrayProperty(blobs: BlobArray): Unit = { + BlobStorageContext.blobStorage.deleteBatch( blobs.value().map(_.asInstanceOf[BlobWithId].id)); } - def deleteBlobProperty(ic: ContextMap, primitive: PrimitiveRecord, propRecord: PropertyRecord, block: PropertyBlock): Unit = { + def deleteBlobProperty(primitive: PrimitiveRecord, propRecord: PropertyRecord, block: PropertyBlock): Unit = { val entry = BlobIO.unpack(block.getValueBlocks); - ic.get[BlobStorage].delete(entry.id); + BlobStorageContext.blobStorage.delete(entry.id); } - def readBlob(ic: ContextMap, bytes: Array[Byte]): Blob = { - readBlobValue(ic, StreamUtils.convertByteArray2LongArray(bytes)).blob; + def readBlob(bytes: Array[Byte]): Blob = { + readBlobValue(StreamUtils.convertByteArray2LongArray(bytes)).blob; } - def readBlobArray(ic: ContextMap, dataBuffer: ByteBuffer, arrayLength: Int): Array[Blob] = { + def readBlobArray(dataBuffer: ByteBuffer, arrayLength: Int): Array[Blob] = { (0 to arrayLength - 1).map { x => val byteLength = dataBuffer.getInt(); val blobByteArray = new Array[Byte](byteLength); dataBuffer.get(blobByteArray); - StoreBlobIO.readBlob(ic, blobByteArray); + StoreBlobIO.readBlob(blobByteArray); }.toArray } - - def readBlobValue(ic: ContextMap, block: PropertyBlock): BlobValue = { - readBlobValue(ic, block.getValueBlocks); + def readBlobValue(block: PropertyBlock): BlobValue = { + readBlobValue(block.getValueBlocks); } - def readBlobValue(ic: ContextMap, values: Array[Long]): BlobValue = { + def readBlobValue(values: Array[Long]): BlobValue = { val entry = BlobIO.unpack(values); - val storage = ic.get[BlobStorage]; val blob = Blob.makeStoredBlob(entry, new InputStreamSource { override def offerStream[T](consume: (InputStream) => T): T = { val bid = entry.id; - storage.load(bid).getOrElse(throw new BlobNotExistException(bid)).offerStream(consume) + BlobStorageContext.blobStorage.load(bid).getOrElse(throw new BlobNotExistException(bid)).offerStream(consume) } }); @@ -84,6 +83,6 @@ object StoreBlobIO extends Logging { } } -class BlobNotExistException(bid: BlobId) extends RuntimeException { +class BlobNotExistException(bid: BlobId) extends PandaException(s"blob does not exist: $bid") { } \ No newline at end of file diff --git a/src/blob/scala/org/neo4j/kernel/impl/blob/storage.scala b/blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/storage.scala similarity index 83% rename from src/blob/scala/org/neo4j/kernel/impl/blob/storage.scala rename to blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/storage.scala index 082f6c67..c8736004 100644 --- a/src/blob/scala/org/neo4j/kernel/impl/blob/storage.scala +++ b/blob-feature/src/main/scala/org/neo4j/kernel/impl/blob/storage.scala @@ -22,12 +22,12 @@ package org.neo4j.kernel.impl.blob import java.io.{File, FileInputStream, FileOutputStream, InputStream} import java.util.UUID -import cn.graiph.blob.{MimeType, BlobId, Blob, InputStreamSource} -import cn.graiph.util._ +import cn.pandadb.blob._ +import cn.pandadb.util.StreamUtils._ +import cn.pandadb.util._ import org.apache.commons.io.filefilter.TrueFileFilter import org.apache.commons.io.{FileUtils, IOUtils} -import StreamUtils._ -import ConfigUtils._ + import scala.collection.JavaConversions._ trait BlobStorage extends BatchBlobValueStorage { @@ -38,7 +38,7 @@ trait BlobStorage extends BatchBlobValueStorage { def delete(id: BlobId): Unit; } -trait BatchBlobValueStorage extends Closable { +trait BatchBlobValueStorage extends ClosableModuleComponent { def saveBatch(blobs: Iterable[Blob]): Iterable[BlobId]; def loadBatch(ids: Iterable[BlobId]): Iterable[Option[Blob]]; @@ -48,14 +48,8 @@ trait BatchBlobValueStorage extends Closable { def iterator(): Iterator[(BlobId, Blob)]; } -trait Closable { - def initialize(storeDir: File, conf: Configuration): Unit; - - def disconnect(): Unit; -} - object BlobStorage extends Logging { - def of(bbvs: BatchBlobValueStorage) = { + def of(bbvs: BatchBlobValueStorage): BlobStorage = { logger.info(s"using batch blob storage: ${bbvs}"); new BlobStorage { @@ -83,11 +77,11 @@ object BlobStorage extends Logging { override def loadBatch(ids: Iterable[BlobId]): Iterable[Option[Blob]] = bbvs.loadBatch(ids) - override def disconnect(): Unit = bbvs.disconnect() + override def iterator(): Iterator[(BlobId, Blob)] = bbvs.iterator(); - override def initialize(storeDir: File, conf: Configuration): Unit = bbvs.initialize(storeDir, conf) + override def start(ctx: PandaModuleContext): Unit = bbvs.start(ctx) - override def iterator(): Iterator[(BlobId, Blob)] = bbvs.iterator(); + override def close(ctx: PandaModuleContext): Unit = bbvs.close(ctx) }; } @@ -102,7 +96,7 @@ object BlobStorage extends Logging { class DefaultLocalFileSystemBlobValueStorage extends BatchBlobValueStorage with Logging { var _rootDir: File = _; - override def saveBatch(blobs: Iterable[Blob]) = { + override def saveBatch(blobs: Iterable[Blob]): Iterable[BlobId] = { blobs.map(blob => { val bid = generateId(); val file = locateFile(bid); @@ -127,7 +121,7 @@ object BlobStorage extends Logging { ids.map(id => Some(readFromBlobFile(locateFile(id))._2)); } - override def deleteBatch(ids: Iterable[BlobId]) = { + override def deleteBatch(ids: Iterable[BlobId]) { ids.foreach { id => locateFile(id).delete() } @@ -164,14 +158,16 @@ object BlobStorage extends Logging { (blobId, blob); } - override def initialize(storeDir: File, conf: Configuration): Unit = { - val baseDir: File = storeDir; //new File(conf.getRaw("unsupported.dbms.directories.neo4j_home").get()); - _rootDir = conf.getAsFile("blob.storage.file.dir", baseDir, new File(baseDir, "/blob")); + override def start(ctx: PandaModuleContext): Unit = { + //val baseDir: File = new File(ctx.storeDir, ctx.neo4jConf.getValue("dbms.active_database").get().toString); + //new File(conf.getRaw("unsupported.dbms.directories.neo4j_home").get()); + _rootDir = BlobStorageContext.blobStorageDir; _rootDir.mkdirs(); logger.info(s"using storage dir: ${_rootDir.getCanonicalPath}"); } - override def disconnect(): Unit = { + override def close(ctx: PandaModuleContext): Unit = { + } override def iterator(): Iterator[(BlobId, Blob)] = { @@ -183,7 +179,7 @@ object BlobStorage extends Logging { def createDefault(): BatchBlobValueStorage = { //will read "default-blob-value-storage-class" entry first - GlobalContext.getOption("default-blob-value-storage-class") + BlobStorageContext.getDefaultBlobValueStorageClass .map(Class.forName(_).newInstance().asInstanceOf[BatchBlobValueStorage]) .getOrElse(new DefaultLocalFileSystemBlobValueStorage()) } diff --git a/src/blob/scala/org/neo4j/values/storable/BlobValue.scala b/blob-feature/src/main/scala/org/neo4j/values/storable/BlobValue.scala similarity index 93% rename from src/blob/scala/org/neo4j/values/storable/BlobValue.scala rename to blob-feature/src/main/scala/org/neo4j/values/storable/BlobValue.scala index 3a061d57..e0bff42a 100644 --- a/src/blob/scala/org/neo4j/values/storable/BlobValue.scala +++ b/blob-feature/src/main/scala/org/neo4j/values/storable/BlobValue.scala @@ -20,13 +20,14 @@ package org.neo4j.values.storable -import cn.graiph.blob.Blob +import cn.pandadb.blob.Blob import org.neo4j.hashing.HashFunction import org.neo4j.values.{AnyValue, ValueMapper} /** * Created by bluejoe on 2018/12/12. */ +//noinspection ScalaStyle case class BlobValue(val blob: Blob) extends ScalarValue { override def unsafeCompareTo(value: Value): Int = blob.length.compareTo(value.asInstanceOf[BlobValue].blob.length) @@ -71,9 +72,9 @@ class BlobArraySupport[X <: BlobArraySupport[X]](val blobs: Array[Blob]) { writer.endArray() } - def unsafeCompareTo(other: Value): Int = if (_equals(other)) 0 else -1; + def unsafeCompareTo(other: Value): Int = if (internalEquals(other)) 0 else -1; - def _equals(other: Value): Boolean = { + def internalEquals(other: Value): Boolean = { other.isInstanceOf[X] && other.asInstanceOf[X].blobs.zip(blobs).map(t => t._1 == t._2).reduce(_ && _) } diff --git a/commons/pom.xml b/commons/pom.xml new file mode 100644 index 00000000..0a1ddf85 --- /dev/null +++ b/commons/pom.xml @@ -0,0 +1,23 @@ + + + + parent + cn.pandadb + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + commons + + + + junit + junit + + + + \ No newline at end of file diff --git a/src/blob/resources/mime.properties b/commons/src/main/resources/mime.properties similarity index 100% rename from src/blob/resources/mime.properties rename to commons/src/main/resources/mime.properties diff --git a/src/graiph-database/scala/cn/graiph/PropertyExtractor.scala b/commons/src/main/scala/cn/pandadb/cypherplus/PropertyExtractor.scala similarity index 75% rename from src/graiph-database/scala/cn/graiph/PropertyExtractor.scala rename to commons/src/main/scala/cn/pandadb/cypherplus/PropertyExtractor.scala index 950014aa..a2990197 100644 --- a/src/graiph-database/scala/cn/graiph/PropertyExtractor.scala +++ b/commons/src/main/scala/cn/pandadb/cypherplus/PropertyExtractor.scala @@ -1,6 +1,6 @@ -package cn.graiph +package cn.pandadb.cypherplus -import cn.graiph.util.Configuration +import cn.pandadb.util.{Configuration} /** * Created by bluejoe on 2019/7/22. diff --git a/commons/src/main/scala/cn/pandadb/cypherplus/comparators.scala b/commons/src/main/scala/cn/pandadb/cypherplus/comparators.scala new file mode 100644 index 00000000..a2f5d5f8 --- /dev/null +++ b/commons/src/main/scala/cn/pandadb/cypherplus/comparators.scala @@ -0,0 +1,40 @@ +package cn.pandadb.cypherplus + +import cn.pandadb.util.{Configuration} + +trait AnyComparator { + def initialize(conf: Configuration); +} + +trait ValueComparator extends AnyComparator { + def compare(a: Any, b: Any): Double; +} + +trait SetComparator extends AnyComparator { + def compareAsSets(a: Any, b: Any): Array[Array[Double]]; +} + +/** + * Created by bluejoe on 2019/1/31. + */ +trait CustomPropertyProvider { + def getCustomProperty(x: Any, propertyName: String): Option[Any]; +} + +trait ValueMatcher { + def like(a: Any, b: Any, algoName: Option[String], threshold: Option[Double]): Option[Boolean]; + + def containsOne(a: Any, b: Any, algoName: Option[String], threshold: Option[Double]): Option[Boolean]; + + def containsSet(a: Any, b: Any, algoName: Option[String], threshold: Option[Double]): Option[Boolean]; + + /** + * compares two values + */ + def compareOne(a: Any, b: Any, algoName: Option[String]): Option[Double]; + + /** + * compares two objects as sets + */ + def compareSet(a: Any, b: Any, algoName: Option[String]): Option[Array[Array[Double]]]; +} diff --git a/commons/src/main/scala/cn/pandadb/cypherplus/utils/CypherPlusUtils.scala b/commons/src/main/scala/cn/pandadb/cypherplus/utils/CypherPlusUtils.scala new file mode 100644 index 00000000..f4eb8fa1 --- /dev/null +++ b/commons/src/main/scala/cn/pandadb/cypherplus/utils/CypherPlusUtils.scala @@ -0,0 +1,24 @@ +package cn.pandadb.cypherplus.utils + +import java.util.Locale + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 14:50 2019/11/27 + * @Modified By: + */ +object CypherPlusUtils { + + def isWriteStatement(cypherStr: String): Boolean = { + val lowerCypher = cypherStr.toLowerCase(Locale.ROOT) + if (lowerCypher.contains("explain")) { + false + } else if (lowerCypher.contains("create") || lowerCypher.contains("merge") || + lowerCypher.contains("set") || lowerCypher.contains("delete")) { + true + } else { + false + } + } +} diff --git a/src/externel-properties/scala/cn/graiph/util/Configuration.scala b/commons/src/main/scala/cn/pandadb/util/Configuration.scala similarity index 72% rename from src/externel-properties/scala/cn/graiph/util/Configuration.scala rename to commons/src/main/scala/cn/pandadb/util/Configuration.scala index 049e7545..c56bca0c 100644 --- a/src/externel-properties/scala/cn/graiph/util/Configuration.scala +++ b/commons/src/main/scala/cn/pandadb/util/Configuration.scala @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -package cn.graiph.util +package cn.pandadb.util import java.io.File @@ -32,7 +32,7 @@ trait Configuration { /** * Created by bluejoe on 2018/11/3. */ -class ConfigurationEx(conf: Configuration) extends Logging { +class ConfigurationOps(conf: Configuration) extends Logging { def getRequiredValueAsString(key: String): String = { getRequiredValue(key, (x) => x); } @@ -68,25 +68,27 @@ class ConfigurationEx(conf: Configuration) extends Logging { } } - def getValueAsString(key: String, defaultValue: String) = + def getValueAsString(key: String, defaultValue: String): String = getValueWithDefault(key, () => defaultValue, (x: String) => x) - def getValueAsClass(key: String, defaultValue: Class[_]) = + def getValueAsClass(key: String, defaultValue: Class[_]): Class[_] = getValueWithDefault(key, () => defaultValue, (x: String) => Class.forName(x)) - def getValueAsInt(key: String, defaultValue: Int) = + def getValueAsInt(key: String, defaultValue: Int): Int = getValueWithDefault[Int](key, () => defaultValue, (x: String) => x.toInt) - def getValueAsBoolean(key: String, defaultValue: Boolean) = + def getValueAsBoolean(key: String, defaultValue: Boolean): Boolean = getValueWithDefault[Boolean](key, () => defaultValue, (x: String) => x.toBoolean) - def getAsFile(key: String, baseDir: File, defaultValue: File) = { + def getAsFile(key: String, baseDir: File, defaultValue: File): File = { getValueWithDefault(key, () => defaultValue, { x => val file = new File(x); - if (file.isAbsolute) - file; - else - new File(baseDir, x); + if (file.isAbsolute) { + file + } + else { + new File(baseDir, x) + } }); } } @@ -102,5 +104,13 @@ class WrongArgumentException(key: String, value: String, clazz: Class[_]) extend } object ConfigUtils { - implicit def config2Ex(conf: Configuration) = new ConfigurationEx(conf); -} + implicit def configOps(conf: Configuration): ConfigurationOps = new ConfigurationOps(conf); + + implicit def mapOps(map: Map[String, String]): ConfigurationOps = new ConfigurationOps(map2Config(map)); + + implicit def contextMapOps(conf: ContextMap): ConfigurationOps = new ConfigurationOps(conf.toConfiguration); + + implicit def map2Config(map: Map[String, String]): Configuration = new Configuration() { + override def getRaw(name: String): Option[String] = map.get(name) + } +} \ No newline at end of file diff --git a/commons/src/main/scala/cn/pandadb/util/ContextMap.scala b/commons/src/main/scala/cn/pandadb/util/ContextMap.scala new file mode 100644 index 00000000..eb358062 --- /dev/null +++ b/commons/src/main/scala/cn/pandadb/util/ContextMap.scala @@ -0,0 +1,41 @@ +package cn.pandadb.util + +import scala.collection.Set +import scala.collection.mutable.{Map => MMap} + +class ContextMap { + private val _map = MMap[String, Any](); + + def keys: Set[String] = _map.keySet; + + protected def put[T](key: String, value: T): T = { + _map(key) = value + value + } + + protected def put[T](value: T)(implicit manifest: Manifest[T]): T = put[T](manifest.runtimeClass.getName, value) + + protected def get[T](key: String): T = { + _map(key).asInstanceOf[T] + } + + protected def getOption[T](key: String): Option[T] = _map.get(key).map(_.asInstanceOf[T]); + + protected def get[T]()(implicit manifest: Manifest[T]): T = get(manifest.runtimeClass.getName); + + protected def getOption[T]()(implicit manifest: Manifest[T]): Option[T] = getOption(manifest.runtimeClass.getName); + + def toConfiguration: Configuration = new Configuration() { + override def getRaw(name: String): Option[String] = getOption(name) + } +} + +object GlobalContext extends ContextMap { + def setLeaderNode(f: Boolean): Unit = super.put("isLeaderNode", f) + + def setWatchDog(f: Boolean): Unit = super.put("isWatchDog", f) + + def isWatchDog(): Boolean = super.getOption[Boolean]("isWatchDog").getOrElse(false) + + def isLeaderNode(): Boolean = super.getOption[Boolean]("isLeaderNode").getOrElse(false) +} \ No newline at end of file diff --git a/src/externel-properties/scala/cn/graiph/util/Logging.scala b/commons/src/main/scala/cn/pandadb/util/Logging.scala similarity index 89% rename from src/externel-properties/scala/cn/graiph/util/Logging.scala rename to commons/src/main/scala/cn/pandadb/util/Logging.scala index 5fdcf856..4ff3182e 100644 --- a/src/externel-properties/scala/cn/graiph/util/Logging.scala +++ b/commons/src/main/scala/cn/pandadb/util/Logging.scala @@ -1,4 +1,4 @@ -package cn.graiph.util +package cn.pandadb.util /** * Created by bluejoe on 2019/10/9. diff --git a/commons/src/main/scala/cn/pandadb/util/PandaException.scala b/commons/src/main/scala/cn/pandadb/util/PandaException.scala new file mode 100644 index 00000000..20abfd2d --- /dev/null +++ b/commons/src/main/scala/cn/pandadb/util/PandaException.scala @@ -0,0 +1,8 @@ +package cn.pandadb.util + +/** + * Created by bluejoe on 2019/11/22. + */ +class PandaException(msg: String, cause: Throwable = null) extends RuntimeException(msg, cause) { + +} diff --git a/src/externel-properties/scala/cn/graiph/util/ReflectUtils.scala b/commons/src/main/scala/cn/pandadb/util/ReflectUtils.scala similarity index 93% rename from src/externel-properties/scala/cn/graiph/util/ReflectUtils.scala rename to commons/src/main/scala/cn/pandadb/util/ReflectUtils.scala index 4bfa7ef8..e4c4bbfa 100644 --- a/src/externel-properties/scala/cn/graiph/util/ReflectUtils.scala +++ b/commons/src/main/scala/cn/pandadb/util/ReflectUtils.scala @@ -17,7 +17,7 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -package cn.graiph.util +package cn.pandadb.util import java.lang.reflect.Field @@ -39,7 +39,7 @@ object ReflectUtils { constructor.newInstance(args.map(_.asInstanceOf[Object]): _*).asInstanceOf[T]; } - def instanceOf(className: String)(args: Any*) = { + def instanceOf(className: String)(args: Any*): Any = { val constructor = Class.forName(className).getDeclaredConstructor(args.map(_.getClass): _*); constructor.setAccessible(true); constructor.newInstance(args.map(_.asInstanceOf[Object]): _*); @@ -71,8 +71,9 @@ class ReflectedObject(o: AnyRef) { catch { case e: NoSuchFieldException => val sc = clazz.getSuperclass; - if (sc == null) + if (sc == null) { throw e; + } _getField(sc, fieldName); } @@ -93,6 +94,6 @@ class ReflectedObject(o: AnyRef) { } class InvalidFieldPathException(o: AnyRef, path: String, cause: Throwable) - extends RuntimeException(s"invalid field path: $path, host: $o", cause) { + extends PandaException(s"invalid field path: $path, host: $o", cause) { } \ No newline at end of file diff --git a/src/externel-properties/scala/cn/graiph/util/StreamUtils.scala b/commons/src/main/scala/cn/pandadb/util/StreamUtils.scala similarity index 92% rename from src/externel-properties/scala/cn/graiph/util/StreamUtils.scala rename to commons/src/main/scala/cn/pandadb/util/StreamUtils.scala index e63fa966..d16df2ca 100644 --- a/src/externel-properties/scala/cn/graiph/util/StreamUtils.scala +++ b/commons/src/main/scala/cn/pandadb/util/StreamUtils.scala @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -package cn.graiph.util +package cn.pandadb.util import java.io.{ByteArrayInputStream, ByteArrayOutputStream, InputStream, OutputStream} @@ -49,9 +49,9 @@ object StreamUtils { baos.readLong() } - implicit def inputStream2Ex(is: InputStream) = new InputStreamEx(is); + implicit def inputStream2Ex(is: InputStream): InputStreamEx = new InputStreamEx(is); - implicit def outputStream2Ex(os: OutputStream) = new OutputStreamEx(os); + implicit def outputStream2Ex(os: OutputStream): OutputStreamEx = new OutputStreamEx(os); } class InputStreamEx(is: InputStream) { @@ -76,8 +76,9 @@ class InputStreamEx(is: InputStream) { val bytes: Array[Byte] = new Array[Byte](n).map(x => 0.toByte); val nread = is.read(bytes); - if (nread != n) + if (nread != n) { throw new InsufficientBytesException(n, nread); + } bytes; } diff --git a/commons/src/main/scala/cn/pandadb/util/event.scala b/commons/src/main/scala/cn/pandadb/util/event.scala new file mode 100644 index 00000000..320598a3 --- /dev/null +++ b/commons/src/main/scala/cn/pandadb/util/event.scala @@ -0,0 +1,17 @@ +package cn.pandadb.util + +import scala.collection.mutable.ArrayBuffer + +trait PandaEvent { +} + +object PandaEventHub { + type PandaEventHandler = PartialFunction[PandaEvent, Unit]; + val handlers = ArrayBuffer[PandaEventHandler](); + + def trigger(handler: PandaEventHandler): Unit = handlers += handler; + + def publish(event: PandaEvent): Unit = { + handlers.filter(_.isDefinedAt(event)).foreach(_.apply(event)) + } +} \ No newline at end of file diff --git a/commons/src/main/scala/cn/pandadb/util/module.scala b/commons/src/main/scala/cn/pandadb/util/module.scala new file mode 100644 index 00000000..c2fee155 --- /dev/null +++ b/commons/src/main/scala/cn/pandadb/util/module.scala @@ -0,0 +1,36 @@ +package cn.pandadb.util + +import java.io.File + +import scala.collection.mutable.ArrayBuffer + +trait ClosableModuleComponent { + def start(ctx: PandaModuleContext): Unit + + def close(ctx: PandaModuleContext): Unit +} + +trait PandaModule extends ClosableModuleComponent { + def init(ctx: PandaModuleContext); +} + +case class PandaModuleContext(configuration: Configuration, storeDir: File, sharedContext: ContextMap) { +} + +class PandaModules extends Logging { + val modules = ArrayBuffer[PandaModule](); + + def add(module: PandaModule): PandaModules = { + modules += module + this + } + + def init(ctx: PandaModuleContext): Unit = modules.foreach { module => + module.init(ctx) + logger.info(s"initialized ${module.getClass.getSimpleName}") + } + + def start(ctx: PandaModuleContext): Unit = modules.foreach(_.start(ctx)) + + def close(ctx: PandaModuleContext): Unit = modules.foreach(_.close(ctx)) +} \ No newline at end of file diff --git a/commons/src/test/scala/CypherPlusUtilsTest.scala b/commons/src/test/scala/CypherPlusUtilsTest.scala new file mode 100644 index 00000000..9a4cd974 --- /dev/null +++ b/commons/src/test/scala/CypherPlusUtilsTest.scala @@ -0,0 +1,50 @@ +import cn.pandadb.cypherplus.utils.CypherPlusUtils +import org.junit.runners.MethodSorters +import org.junit.{Assert, FixMethodOrder, Test} + +import scala.collection.mutable.ListBuffer + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 17:19 2019/12/7 + * @Modified By: + */ +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +class CypherPlusUtilsTest { + var writeStatements: ListBuffer[String] = new ListBuffer[String] + var notWriteStatements: ListBuffer[String] = new ListBuffer[String] + + notWriteStatements.append("EXPlain Create(n:Test)") + notWriteStatements.append("Explain Match(n:T) Delete n;") + notWriteStatements.append("expLaIN Match(n) set n.name='panda'") + notWriteStatements.append("Match(n) \n with n \n Return n.name") + + writeStatements.append("Create(n:Test{prop:'prop'})") + writeStatements.append("Merge(n:T{name:'panda'})") + writeStatements.append("Match(n:Test) sET n.prop=123") + writeStatements.append("Match(n) Where n.prop=123 DeLETe n") + + @Test + def test1(): Unit = { + writeStatements.toList.foreach(statement => { + if (!CypherPlusUtils.isWriteStatement(statement)) { + // scalastyle:off + println(s"error: ${statement} judged as a not-write statement.") + } + Assert.assertEquals(true, CypherPlusUtils.isWriteStatement(statement)) + }) + } + + @Test + def test2(): Unit = { + notWriteStatements.toList.foreach(statement => { + if (CypherPlusUtils.isWriteStatement(statement)) { + // scalastyle:off + println(s"error: ${statement} judged as a write statement.") + } + Assert.assertEquals(false, CypherPlusUtils.isWriteStatement(statement)) + }) + } + +} diff --git a/testdata/cypher-plugins.xml b/cypher-plugins.xml similarity index 82% rename from testdata/cypher-plugins.xml rename to cypher-plugins.xml index f73a8f2b..d7f018f3 100644 --- a/testdata/cypher-plugins.xml +++ b/cypher-plugins.xml @@ -3,47 +3,47 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.0.xsd"> - + - + - + - + - + - + - + - + @@ -54,7 +54,7 @@ - + @@ -62,7 +62,7 @@ - + @@ -70,7 +70,7 @@ - + @@ -78,7 +78,7 @@ - + diff --git a/docs/codespec.md b/docs/codespec.md index 73cf05f0..c70693cc 100644 --- a/docs/codespec.md +++ b/docs/codespec.md @@ -13,6 +13,7 @@ ### Rule 3: Do not keep useless comments!!! > * wrong comments/validated comments will lead people to a wrong way! > * please write your comments in English! +> * pay attention to blank lines, remove unnecessary blank lines please. ### Rule 4: Use s"" to format string > * use: `s"hello, $name"`, do not use: `"hello, "+name` @@ -24,6 +25,7 @@ ### Rule 6: Write test cases > * write JUnit test cases, instead of write a Java program > * separate test source code with main source code +> * Use `Assert`, don't judge manually. ### Rule 7: NO hard coding > * NO magic numbers @@ -38,4 +40,4 @@ ### Rule 9: Please limit maximum line length -> * Limit all lines to a maximum of 79 characters. \ No newline at end of file +> * Limit all lines to a maximum of 79 characters. diff --git a/docs/logo.png b/docs/logo.png new file mode 100644 index 00000000..4daa62c7 Binary files /dev/null and b/docs/logo.png differ diff --git a/external-properties/pom.xml b/external-properties/pom.xml new file mode 100644 index 00000000..e5945a02 --- /dev/null +++ b/external-properties/pom.xml @@ -0,0 +1,73 @@ + + + + parent + cn.pandadb + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + external-properties + + + + cn.pandadb + commons + ${pandadb.version} + compile + + + cn.pandadb + network-commons + ${pandadb.version} + compile + + + cn.pandadb + neo4j-hacking + ${pandadb.version} + compile + + + org.apache.solr + solr-solrj + + + org.elasticsearch.client + elasticsearch-rest-high-level-client + 6.5.0 + + + + com.alibaba + fastjson + 1.2.62 + + + + + + + + net.alchim31.maven + scala-maven-plugin + 3.2.1 + + + scala-compile-first + process-resources + + add-source + compile + + + + + + + + \ No newline at end of file diff --git a/external-properties/src/main/java/org/neo4j/kernel/impl/api/KernelTransactionImplementation.java b/external-properties/src/main/java/org/neo4j/kernel/impl/api/KernelTransactionImplementation.java new file mode 100644 index 00000000..7c4e80c3 --- /dev/null +++ b/external-properties/src/main/java/org/neo4j/kernel/impl/api/KernelTransactionImplementation.java @@ -0,0 +1,1308 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +package org.neo4j.kernel.impl.api; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Function; +import java.util.stream.Stream; + +import org.neo4j.collection.pool.Pool; +import org.neo4j.graphdb.NotInTransactionException; +import org.neo4j.graphdb.TransactionTerminatedException; +import org.neo4j.internal.kernel.api.CursorFactory; +import org.neo4j.internal.kernel.api.ExecutionStatistics; +import org.neo4j.internal.kernel.api.ExplicitIndexRead; +import org.neo4j.internal.kernel.api.ExplicitIndexWrite; +import org.neo4j.internal.kernel.api.NodeCursor; +import org.neo4j.internal.kernel.api.PropertyCursor; +import org.neo4j.internal.kernel.api.Read; +import org.neo4j.internal.kernel.api.RelationshipScanCursor; +import org.neo4j.internal.kernel.api.SchemaRead; +import org.neo4j.internal.kernel.api.SchemaWrite; +import org.neo4j.internal.kernel.api.Token; +import org.neo4j.internal.kernel.api.TokenRead; +import org.neo4j.internal.kernel.api.TokenWrite; +import org.neo4j.internal.kernel.api.Write; +import org.neo4j.internal.kernel.api.exceptions.InvalidTransactionTypeKernelException; +import org.neo4j.internal.kernel.api.exceptions.TransactionFailureException; +import org.neo4j.internal.kernel.api.exceptions.schema.ConstraintValidationException; +import org.neo4j.internal.kernel.api.exceptions.schema.CreateConstraintFailureException; +import org.neo4j.internal.kernel.api.exceptions.schema.SchemaKernelException; +import org.neo4j.internal.kernel.api.schema.SchemaDescriptor; +import org.neo4j.internal.kernel.api.security.AccessMode; +import org.neo4j.internal.kernel.api.security.AuthSubject; +import org.neo4j.internal.kernel.api.security.SecurityContext; +import org.neo4j.io.pagecache.tracing.cursor.PageCursorTracer; +import org.neo4j.io.pagecache.tracing.cursor.PageCursorTracerSupplier; +import org.neo4j.io.pagecache.tracing.cursor.context.VersionContextSupplier; +import org.neo4j.kernel.api.KernelTransaction; +import org.neo4j.kernel.api.SilentTokenNameLookup; +import org.neo4j.kernel.api.exceptions.ConstraintViolationTransactionFailureException; +import org.neo4j.kernel.api.exceptions.Status; +import org.neo4j.kernel.api.explicitindex.AutoIndexing; +import org.neo4j.kernel.api.txstate.ExplicitIndexTransactionState; +import org.neo4j.kernel.api.txstate.TransactionState; +import org.neo4j.kernel.api.txstate.TxStateHolder; +import org.neo4j.kernel.api.txstate.auxiliary.AuxiliaryTransactionState; +import org.neo4j.kernel.api.txstate.auxiliary.AuxiliaryTransactionStateCloseException; +import org.neo4j.kernel.api.txstate.auxiliary.AuxiliaryTransactionStateHolder; +import org.neo4j.kernel.api.txstate.auxiliary.AuxiliaryTransactionStateManager; +import org.neo4j.kernel.configuration.Config; +import org.neo4j.kernel.impl.api.index.IndexingService; +import org.neo4j.kernel.impl.api.state.ConstraintIndexCreator; +import org.neo4j.kernel.impl.api.state.TxState; +import org.neo4j.kernel.impl.constraints.ConstraintSemantics; +import org.neo4j.kernel.impl.core.TokenHolders; +import org.neo4j.kernel.impl.factory.AccessCapability; +import org.neo4j.kernel.impl.index.ExplicitIndexStore; +import org.neo4j.kernel.impl.locking.ActiveLock; +import org.neo4j.kernel.impl.locking.Locks; +import org.neo4j.kernel.impl.locking.StatementLocks; +import org.neo4j.kernel.impl.newapi.AllStoreHolder; +import org.neo4j.kernel.impl.newapi.DefaultCursors; +import org.neo4j.kernel.impl.newapi.IndexTxStateUpdater; +import org.neo4j.kernel.impl.newapi.KernelToken; +import org.neo4j.kernel.impl.newapi.Operations; +import org.neo4j.kernel.impl.proc.Procedures; +import org.neo4j.kernel.impl.transaction.TransactionHeaderInformationFactory; +import org.neo4j.kernel.impl.transaction.TransactionMonitor; +import org.neo4j.kernel.impl.transaction.log.PhysicalTransactionRepresentation; +import org.neo4j.kernel.impl.transaction.tracing.CommitEvent; +import org.neo4j.kernel.impl.transaction.tracing.TransactionEvent; +import org.neo4j.kernel.impl.transaction.tracing.TransactionTracer; +import org.neo4j.kernel.impl.util.Dependencies; +import org.neo4j.kernel.impl.util.collection.CollectionsFactory; +import org.neo4j.kernel.impl.util.collection.CollectionsFactorySupplier; +import org.neo4j.resources.CpuClock; +import org.neo4j.resources.HeapAllocation; +import org.neo4j.storageengine.api.StorageCommand; +import org.neo4j.storageengine.api.StorageEngine; +import org.neo4j.storageengine.api.StorageReader; +import org.neo4j.storageengine.api.lock.LockTracer; +import org.neo4j.storageengine.api.schema.IndexDescriptor; +import org.neo4j.storageengine.api.txstate.TxStateVisitor; + +import static java.lang.String.format; +import static java.util.Collections.emptyMap; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.neo4j.storageengine.api.TransactionApplicationMode.INTERNAL; + +/** + * This class should replace the {@link org.neo4j.kernel.api.KernelTransaction} interface, and take its name, as soon + * as + * {@code TransitionalTxManagementKernelTransaction} is gone from {@code server}. + */ +public class KernelTransactionImplementation implements KernelTransaction, TxStateHolder, ExecutionStatistics +{ + /* + * IMPORTANT: + * This class is pooled and re-used. If you add *any* state to it, you *must* make sure that: + * - the #initialize() method resets that state for re-use + * - the #release() method releases resources acquired in #initialize() or during the transaction's life time + */ + + // default values for not committed tx id and tx commit time + private static final long NOT_COMMITTED_TRANSACTION_ID = -1; + private static final long NOT_COMMITTED_TRANSACTION_COMMIT_TIME = -1; + + private final CollectionsFactory collectionsFactory; + + // Logic + private final SchemaWriteGuard schemaWriteGuard; + private final TransactionHooks hooks; + private final ConstraintIndexCreator constraintIndexCreator; + private final StorageEngine storageEngine; + private final TransactionTracer transactionTracer; + private final Pool pool; + private final AuxiliaryTransactionStateManager auxTxStateManager; + + // For committing + private final TransactionHeaderInformationFactory headerInformationFactory; + private final TransactionCommitProcess commitProcess; + private final TransactionMonitor transactionMonitor; + private final PageCursorTracerSupplier cursorTracerSupplier; + private final VersionContextSupplier versionContextSupplier; + private final StorageReader storageReader; + private final ClockContext clocks; + private final AccessCapability accessCapability; + private final ConstraintSemantics constraintSemantics; + + // State that needs to be reset between uses. Most of these should be cleared or released in #release(), + // whereas others, such as timestamp or txId when transaction starts, even locks, needs to be set in #initialize(). + private TxState txState; + private AuxiliaryTransactionStateHolder auxTxStateHolder; + private volatile TransactionWriteState writeState; + private TransactionHooks.TransactionHooksState hooksState; + private final KernelStatement currentStatement; + private final List closeListeners = new ArrayList<>( 2 ); + private SecurityContext securityContext; + private volatile StatementLocks statementLocks; + private volatile long userTransactionId; + private boolean beforeHookInvoked; + private volatile boolean closing; + private volatile boolean closed; + private boolean failure; + private boolean success; + private volatile Status terminationReason; + private long startTimeMillis; + private long timeoutMillis; + private long lastTransactionIdWhenStarted; + private volatile long lastTransactionTimestampWhenStarted; + private final Statistics statistics; + private TransactionEvent transactionEvent; + private Type type; + private long transactionId; + private long commitTime; + private volatile int reuseCount; + private volatile Map userMetaData; + private final Operations operations; + + /** + * Lock prevents transaction {@link #markForTermination(Status)} transaction termination} from interfering with + * {@link #close() transaction commit} and specifically with {@link #release()}. + * Termination can run concurrently with commit and we need to make sure that it terminates the right lock client + * and the right transaction (with the right {@link #reuseCount}) because {@link KernelTransactionImplementation} + * instances are pooled. + */ + private final Lock terminationReleaseLock = new ReentrantLock(); + + public KernelTransactionImplementation( Config config, StatementOperationParts statementOperations, SchemaWriteGuard schemaWriteGuard, + TransactionHooks hooks, ConstraintIndexCreator constraintIndexCreator, Procedures procedures, + TransactionHeaderInformationFactory headerInformationFactory, TransactionCommitProcess commitProcess, TransactionMonitor transactionMonitor, + AuxiliaryTransactionStateManager auxTxStateManager, Pool pool, Clock clock, + AtomicReference cpuClockRef, AtomicReference heapAllocationRef, TransactionTracer transactionTracer, + LockTracer lockTracer, PageCursorTracerSupplier cursorTracerSupplier, StorageEngine storageEngine, AccessCapability accessCapability, + AutoIndexing autoIndexing, ExplicitIndexStore explicitIndexStore, VersionContextSupplier versionContextSupplier, + CollectionsFactorySupplier collectionsFactorySupplier, ConstraintSemantics constraintSemantics, SchemaState schemaState, + IndexingService indexingService, TokenHolders tokenHolders, Dependencies dataSourceDependencies ) + { + this.schemaWriteGuard = schemaWriteGuard; + this.hooks = hooks; + this.constraintIndexCreator = constraintIndexCreator; + this.headerInformationFactory = headerInformationFactory; + this.commitProcess = commitProcess; + this.transactionMonitor = transactionMonitor; + this.storageReader = storageEngine.newReader(); + this.storageEngine = storageEngine; + this.auxTxStateManager = auxTxStateManager; + this.pool = pool; + this.clocks = new ClockContext( clock ); + this.transactionTracer = transactionTracer; + this.cursorTracerSupplier = cursorTracerSupplier; + this.versionContextSupplier = versionContextSupplier; + this.currentStatement = new KernelStatement( this, this, storageReader, + lockTracer, statementOperations, this.clocks, + versionContextSupplier ); + this.accessCapability = accessCapability; + this.statistics = new Statistics( this, cpuClockRef, heapAllocationRef ); + this.userMetaData = emptyMap(); + this.constraintSemantics = constraintSemantics; + DefaultCursors cursors = new DefaultCursors( storageReader ); + AllStoreHolder allStoreHolder = + new AllStoreHolder( storageReader, this, cursors, explicitIndexStore, + procedures, schemaState, dataSourceDependencies ); + this.operations = + new Operations( + allStoreHolder, + new IndexTxStateUpdater( allStoreHolder, indexingService ), storageReader, + this, + new KernelToken( storageReader, this, tokenHolders ), + cursors, + autoIndexing, + constraintIndexCreator, + constraintSemantics, + indexingService, + config ); + this.collectionsFactory = collectionsFactorySupplier.create(); + } + + /** + * Reset this transaction to a vanilla state, turning it into a logically new transaction. + */ + public KernelTransactionImplementation initialize( long lastCommittedTx, long lastTimeStamp, StatementLocks statementLocks, Type type, + SecurityContext frozenSecurityContext, long transactionTimeout, long userTransactionId ) + { + this.type = type; + this.statementLocks = statementLocks; + this.userTransactionId = userTransactionId; + this.terminationReason = null; + this.closing = false; + this.closed = false; + this.beforeHookInvoked = false; + this.failure = false; + this.success = false; + this.writeState = TransactionWriteState.NONE; + this.startTimeMillis = clocks.systemClock().millis(); + this.timeoutMillis = transactionTimeout; + this.lastTransactionIdWhenStarted = lastCommittedTx; + this.lastTransactionTimestampWhenStarted = lastTimeStamp; + this.transactionEvent = transactionTracer.beginTransaction(); + assert transactionEvent != null : "transactionEvent was null!"; + this.securityContext = frozenSecurityContext; + this.transactionId = NOT_COMMITTED_TRANSACTION_ID; + this.commitTime = NOT_COMMITTED_TRANSACTION_COMMIT_TIME; + PageCursorTracer pageCursorTracer = cursorTracerSupplier.get(); + this.statistics.init( Thread.currentThread().getId(), pageCursorTracer ); + this.currentStatement.initialize( statementLocks, pageCursorTracer ); + this.operations.initialize(); + return this; + } + + int getReuseCount() + { + return reuseCount; + } + + @Override + public long startTime() + { + return startTimeMillis; + } + + @Override + public long timeout() + { + return timeoutMillis; + } + + @Override + public long lastTransactionIdWhenStarted() + { + return lastTransactionIdWhenStarted; + } + + @Override + public void success() + { + this.success = true; + } + + boolean isSuccess() + { + return success; + } + + @Override + public void failure() + { + failure = true; + } + + @Override + public Optional getReasonIfTerminated() + { + return Optional.ofNullable( terminationReason ); + } + + boolean markForTermination( long expectedReuseCount, Status reason ) + { + terminationReleaseLock.lock(); + try + { + return expectedReuseCount == reuseCount && markForTerminationIfPossible( reason ); + } + finally + { + terminationReleaseLock.unlock(); + } + } + + /** + * {@inheritDoc} + *

+ * This method is guarded by {@link #terminationReleaseLock} to coordinate concurrent + * {@link #close()} and {@link #release()} calls. + */ + @Override + public void markForTermination( Status reason ) + { + terminationReleaseLock.lock(); + try + { + markForTerminationIfPossible( reason ); + } + finally + { + terminationReleaseLock.unlock(); + } + } + + @Override + public boolean isSchemaTransaction() + { + return writeState == TransactionWriteState.SCHEMA; + } + + private boolean markForTerminationIfPossible( Status reason ) + { + if ( canBeTerminated() ) + { + failure = true; + terminationReason = reason; + if ( statementLocks != null ) + { + statementLocks.stop(); + } + transactionMonitor.transactionTerminated( hasTxStateWithChanges() ); + return true; + } + return false; + } + + @Override + public boolean isOpen() + { + return !closed && !closing; + } + + @Override + public SecurityContext securityContext() + { + if ( securityContext == null ) + { + throw new NotInTransactionException(); + } + return securityContext; + } + + @Override + public AuthSubject subjectOrAnonymous() + { + SecurityContext context = this.securityContext; + return context == null ? AuthSubject.ANONYMOUS : context.subject(); + } + + @Override + public void setMetaData( Map data ) + { + this.userMetaData = data; + } + + @Override + public Map getMetaData() + { + return userMetaData; + } + + @Override + public KernelStatement acquireStatement() + { + assertTransactionOpen(); + currentStatement.acquire(); + return currentStatement; + } + + @Override + public IndexDescriptor indexUniqueCreate( SchemaDescriptor schema, String provider ) throws SchemaKernelException + { + return operations.indexUniqueCreate( schema, provider ); + } + + @Override + public long pageHits() + { + return cursorTracerSupplier.get().hits(); + } + + @Override + public long pageFaults() + { + return cursorTracerSupplier.get().faults(); + } + + ExecutingQueryList executingQueries() + { + return currentStatement.executingQueryList(); + } + + void upgradeToDataWrites() throws InvalidTransactionTypeKernelException + { + writeState = writeState.upgradeToDataWrites(); + } + + void upgradeToSchemaWrites() throws InvalidTransactionTypeKernelException + { + schemaWriteGuard.assertSchemaWritesAllowed(); + writeState = writeState.upgradeToSchemaWrites(); + } + + private void dropCreatedConstraintIndexes() throws TransactionFailureException + { + if ( hasTxStateWithChanges() ) + { + for ( IndexDescriptor createdConstraintIndex : txState().constraintIndexesCreatedInTx() ) + { + // TODO logically, which statement should this operation be performed on? + constraintIndexCreator.dropUniquenessConstraintIndex( createdConstraintIndex ); + } + } + } + + @Override + public TransactionState txState() + { + if ( txState == null ) + { + transactionMonitor.upgradeToWriteTransaction(); + txState = new TxState( collectionsFactory ); + } + return txState; + } + + private AuxiliaryTransactionStateHolder getAuxTxStateHolder() + { + if ( auxTxStateHolder == null ) + { + auxTxStateHolder = auxTxStateManager.openStateHolder(); + } + return auxTxStateHolder; + } + + @Override + public AuxiliaryTransactionState auxiliaryTxState( Object providerIdentityKey ) + { + return getAuxTxStateHolder().getState( providerIdentityKey ); + } + + @Override + public ExplicitIndexTransactionState explicitIndexTxState() + { + return (ExplicitIndexTransactionState) getAuxTxStateHolder().getState( ExplicitIndexTransactionStateProvider.PROVIDER_KEY ); + } + + @Override + public boolean hasTxStateWithChanges() + { + return txState != null && txState.hasChanges(); + } + + private void markAsClosed( long txId ) + { + assertTransactionOpen(); + closed = true; + notifyListeners( txId ); + closeCurrentStatementIfAny(); + } + + private void notifyListeners( long txId ) + { + for ( CloseListener closeListener : closeListeners ) + { + closeListener.notify( txId ); + } + } + + private void closeCurrentStatementIfAny() + { + currentStatement.forceClose(); + } + + private void assertTransactionNotClosing() + { + if ( closing ) + { + throw new IllegalStateException( "This transaction is already being closed." ); + } + } + + private void assertTransactionOpen() + { + if ( closed ) + { + throw new IllegalStateException( "This transaction has already been completed." ); + } + } + + @Override + public void assertOpen() + { + Status reason = this.terminationReason; + if ( reason != null ) + { + throw new TransactionTerminatedException( reason ); + } + if ( closed ) + { + throw new NotInTransactionException( "The transaction has been closed." ); + } + } + + private boolean hasChanges() + { + return hasTxStateWithChanges() || hasAuxTxStateChanges(); + } + + private boolean hasAuxTxStateChanges() + { + return auxTxStateHolder != null && getAuxTxStateHolder().hasChanges(); + } + + private boolean hasDataChanges() + { + return hasTxStateWithChanges() && txState.hasDataChanges(); + } + + @Override + public long closeTransaction() throws TransactionFailureException + { + assertTransactionOpen(); + assertTransactionNotClosing(); + closing = true; + try + { + if ( failure || !success || isTerminated() ) + { + // NOTE: pandadb + this.operations.customPropWriteTx().rollback(); + // END-NOTE + rollback(); + failOnNonExplicitRollbackIfNeeded(); + return ROLLBACK; + } + else + { + return commit(); + } + } + finally + { + try + { + // NOTE: pandadb + this.operations.customPropWriteTx().close(); + // END-NOTE + closed = true; + closing = false; + transactionEvent.setSuccess( success ); + transactionEvent.setFailure( failure ); + transactionEvent.setTransactionWriteState( writeState.name() ); + transactionEvent.setReadOnly( txState == null || !txState.hasChanges() ); + transactionEvent.close(); + } + finally + { + release(); + } + } + } + + public boolean isClosing() + { + return closing; + } + + /** + * Throws exception if this transaction was marked as successful but failure flag has also been set to true. + *

+ * This could happen when: + *

    + *
  • caller explicitly calls both {@link #success()} and {@link #failure()}
  • + *
  • caller explicitly calls {@link #success()} but transaction execution fails
  • + *
  • caller explicitly calls {@link #success()} but transaction is terminated
  • + *
+ *

+ * + * @throws TransactionFailureException when execution failed + * @throws TransactionTerminatedException when transaction was terminated + */ + private void failOnNonExplicitRollbackIfNeeded() throws TransactionFailureException + { + if ( success && isTerminated() ) + { + throw new TransactionTerminatedException( terminationReason ); + } + if ( success ) + { + // Success was called, but also failure which means that the client code using this + // transaction passed through a happy path, but the transaction was still marked as + // failed for one or more reasons. Tell the user that although it looked happy it + // wasn't committed, but was instead rolled back. + throw new TransactionFailureException( Status.Transaction.TransactionMarkedAsFailed, + "Transaction rolled back even if marked as successful" ); + } + } + + private long commit() throws TransactionFailureException + { + boolean success = false; + long txId = READ_ONLY; + + try ( CommitEvent commitEvent = transactionEvent.beginCommitEvent() ) + { + // Trigger transaction "before" hooks. + if ( hasDataChanges() ) + { + try + { + hooksState = hooks.beforeCommit( txState, this, storageReader ); + if ( hooksState != null && hooksState.failed() ) + { + Throwable cause = hooksState.failure(); + throw new TransactionFailureException( Status.Transaction.TransactionHookFailed, cause, "" ); + } + } + finally + { + beforeHookInvoked = true; + } + } + + // Convert changes into commands and commit + if ( hasChanges() ) + { + // grab all optimistic locks now, locks can't be deferred any further + statementLocks.prepareForCommit( currentStatement.lockTracer() ); + // use pessimistic locks for the rest of the commit process, locks can't be deferred any further + Locks.Client commitLocks = statementLocks.pessimistic(); + + // Gather up commands from the various sources + Collection extractedCommands = new ArrayList<>(); + storageEngine.createCommands( + extractedCommands, + txState, storageReader, + commitLocks, + lastTransactionIdWhenStarted, + this::enforceConstraints ); + if ( hasAuxTxStateChanges() ) + { + auxTxStateHolder.extractCommands( extractedCommands ); + } + + /* Here's the deal: we track a quick-to-access hasChanges in transaction state which is true + * if there are any changes imposed by this transaction. Some changes made inside a transaction undo + * previously made changes in that same transaction, and so at some point a transaction may have + * changes and at another point, after more changes seemingly, + * the transaction may not have any changes. + * However, to track that "undoing" of the changes is a bit tedious, intrusive and hard to maintain + * and get right.... So to really make sure the transaction has changes we re-check by looking if we + * have produced any commands to add to the logical log. + */ + if ( !extractedCommands.isEmpty() ) + { + // Finish up the whole transaction representation + PhysicalTransactionRepresentation transactionRepresentation = + new PhysicalTransactionRepresentation( extractedCommands ); + TransactionHeaderInformation headerInformation = headerInformationFactory.create(); + long timeCommitted = clocks.systemClock().millis(); + transactionRepresentation.setHeader( headerInformation.getAdditionalHeader(), + headerInformation.getMasterId(), + headerInformation.getAuthorId(), + startTimeMillis, lastTransactionIdWhenStarted, timeCommitted, + commitLocks.getLockSessionId() ); + + // Commit the transaction + success = true; + TransactionToApply batch = new TransactionToApply( transactionRepresentation, + versionContextSupplier.getVersionContext() ); + txId = transactionId = commitProcess.commit( batch, commitEvent, INTERNAL ); + commitTime = timeCommitted; + } + } + // NOTE: pandadb + this.operations.customPropWriteTx().commit(); + // END-NOTE + success = true; + return txId; + } + catch ( ConstraintValidationException | CreateConstraintFailureException e ) + { + throw new ConstraintViolationTransactionFailureException( + e.getUserMessage( new SilentTokenNameLookup( tokenRead() ) ), e ); + } + finally + { + if ( !success ) + { + // NOTE: pandadb + this.operations.customPropWriteTx().rollback(); + // END-NOTE + rollback(); + } + else + { + afterCommit( txId ); + } + } + } + + private void rollback() throws TransactionFailureException + { + try + { + try + { + dropCreatedConstraintIndexes(); + } + catch ( IllegalStateException | SecurityException e ) + { + throw new TransactionFailureException( Status.Transaction.TransactionRollbackFailed, e, + "Could not drop created constraint indexes" ); + } + + // Free any acquired id's + if ( txState != null ) + { + try + { + txState.accept( new TxStateVisitor.Adapter() + { + @Override + public void visitCreatedNode( long id ) + { + storageReader.releaseNode( id ); + } + + @Override + public void visitCreatedRelationship( long id, int type, long startNode, long endNode ) + { + storageReader.releaseRelationship( id ); + } + } ); + } + catch ( ConstraintValidationException | CreateConstraintFailureException e ) + { + throw new IllegalStateException( + "Releasing locks during rollback should perform no constraints checking.", e ); + } + } + } + finally + { + afterRollback(); + } + } + + @Override + public Read dataRead() + { + assertAllows( AccessMode::allowsReads, "Read" ); + return operations.dataRead(); + } + + @Override + public Write dataWrite() throws InvalidTransactionTypeKernelException + { + accessCapability.assertCanWrite(); + assertAllows( AccessMode::allowsWrites, "Write" ); + upgradeToDataWrites(); + return operations; + } + + @Override + public TokenWrite tokenWrite() + { + accessCapability.assertCanWrite(); + return operations.token(); + } + + @Override + public Token token() + { + accessCapability.assertCanWrite(); + return operations.token(); + } + + @Override + public TokenRead tokenRead() + { + assertAllows( AccessMode::allowsReads, "Read" ); + return operations.token(); + } + + @Override + public ExplicitIndexRead indexRead() + { + assertAllows( AccessMode::allowsReads, "Read" ); + + return operations.indexRead(); + } + + @Override + public ExplicitIndexWrite indexWrite() throws InvalidTransactionTypeKernelException + { + accessCapability.assertCanWrite(); + assertAllows( AccessMode::allowsWrites, "Write" ); + upgradeToDataWrites(); + + return operations; + } + + @Override + public SchemaRead schemaRead() + { + assertAllows( AccessMode::allowsReads, "Read" ); + return operations.schemaRead(); + } + + @Override + public SchemaWrite schemaWrite() throws InvalidTransactionTypeKernelException + { + accessCapability.assertCanWrite(); + assertAllows( AccessMode::allowsSchemaWrites, "Schema" ); + + upgradeToSchemaWrites(); + return operations; + } + + @Override + public org.neo4j.internal.kernel.api.Locks locks() + { + return operations.locks(); + } + + public StatementLocks statementLocks() + { + assertOpen(); + return statementLocks; + } + + @Override + public CursorFactory cursors() + { + return operations.cursors(); + } + + @Override + public org.neo4j.internal.kernel.api.Procedures procedures() + { + return operations.procedures(); + } + + @Override + public ExecutionStatistics executionStatistics() + { + return this; + } + + public LockTracer lockTracer() + { + return currentStatement.lockTracer(); + } + + public void assertAllows( Function allows, String mode ) + { + AccessMode accessMode = securityContext().mode(); + if ( !allows.apply( accessMode ) ) + { + throw accessMode.onViolation( + format( "%s operations are not allowed for %s.", mode, + securityContext().description() ) ); + } + } + + private void afterCommit( long txId ) + { + try + { + markAsClosed( txId ); + if ( beforeHookInvoked ) + { + hooks.afterCommit( txState, this, hooksState ); + } + } + finally + { + transactionMonitor.transactionFinished( true, hasTxStateWithChanges() ); + } + } + + private void afterRollback() + { + try + { + markAsClosed( ROLLBACK ); + if ( beforeHookInvoked ) + { + hooks.afterRollback( txState, this, hooksState ); + } + } + finally + { + transactionMonitor.transactionFinished( false, hasTxStateWithChanges() ); + } + } + + /** + * Release resources held up by this transaction & return it to the transaction pool. + * This method is guarded by {@link #terminationReleaseLock} to coordinate concurrent + * {@link #markForTermination(Status)} calls. + */ + private void release() + { + AuxiliaryTransactionStateCloseException auxStateCloseException = null; + terminationReleaseLock.lock(); + try + { + statementLocks.close(); + statementLocks = null; + terminationReason = null; + type = null; + securityContext = null; + transactionEvent = null; + if ( auxTxStateHolder != null ) + { + auxStateCloseException = closeAuxTxState(); + } + txState = null; + collectionsFactory.release(); + hooksState = null; + closeListeners.clear(); + reuseCount++; + userMetaData = emptyMap(); + userTransactionId = 0; + statistics.reset(); + operations.release(); + pool.release( this ); + } + finally + { + terminationReleaseLock.unlock(); + } + if ( auxStateCloseException != null ) + { + throw auxStateCloseException; + } + } + + private AuxiliaryTransactionStateCloseException closeAuxTxState() + { + AuxiliaryTransactionStateHolder holder = auxTxStateHolder; + auxTxStateHolder = null; + try + { + holder.close(); + } + catch ( AuxiliaryTransactionStateCloseException e ) + { + return e; + } + return null; + } + + /** + * Transaction can be terminated only when it is not closed and not already terminated. + * Otherwise termination does not make sense. + */ + private boolean canBeTerminated() + { + return !closed && !isTerminated(); + } + + @Override + public boolean isTerminated() + { + return terminationReason != null; + } + + @Override + public long lastTransactionTimestampWhenStarted() + { + return lastTransactionTimestampWhenStarted; + } + + @Override + public void registerCloseListener( CloseListener listener ) + { + assert listener != null; + closeListeners.add( listener ); + } + + @Override + public Type transactionType() + { + return type; + } + + @Override + public long getTransactionId() + { + if ( transactionId == NOT_COMMITTED_TRANSACTION_ID ) + { + throw new IllegalStateException( "Transaction id is not assigned yet. " + + "It will be assigned during transaction commit." ); + } + return transactionId; + } + + @Override + public long getCommitTime() + { + if ( commitTime == NOT_COMMITTED_TRANSACTION_COMMIT_TIME ) + { + throw new IllegalStateException( "Transaction commit time is not assigned yet. " + + "It will be assigned during transaction commit." ); + } + return commitTime; + } + + @Override + public Revertable overrideWith( SecurityContext context ) + { + SecurityContext oldContext = this.securityContext; + this.securityContext = context; + return () -> this.securityContext = oldContext; + } + + @Override + public String toString() + { + String lockSessionId = statementLocks == null + ? "statementLocks == null" + : String.valueOf( statementLocks.pessimistic().getLockSessionId() ); + + return "KernelTransaction[" + lockSessionId + "]"; + } + + public void dispose() + { + storageReader.close(); + } + + /** + * This method will be invoked by concurrent threads for inspecting the locks held by this transaction. + *

+ * The fact that {@link #statementLocks} is a volatile fields, grants us enough of a read barrier to get a good + * enough snapshot of the lock state (as long as the underlying methods give us such guarantees). + * + * @return the locks held by this transaction. + */ + public Stream activeLocks() + { + StatementLocks locks = this.statementLocks; + return locks == null ? Stream.empty() : locks.activeLocks(); + } + + long userTransactionId() + { + return userTransactionId; + } + + public Statistics getStatistics() + { + return statistics; + } + + private TxStateVisitor enforceConstraints( TxStateVisitor txStateVisitor ) + { + return constraintSemantics.decorateTxStateVisitor( storageReader, operations.dataRead(), operations.cursors(), txState, txStateVisitor ); + } + + /** + * The revision of the data changes in this transaction. This number is opaque, except that it is zero if there have been no data changes in this + * transaction. And making and then undoing a change does not count as "no data changes." This number will always change when there is a data change in a + * transaction, however, such that one can reliably tell whether or not there has been any data changes in a transaction since last time the transaction + * data revision was obtained for the given transaction. + * @return The opaque data revision for this transaction, or zero if there has been no data changes in this transaction. + */ + public long getTransactionDataRevision() + { + return hasDataChanges() ? txState.getDataRevision() : 0; + } + + public static class Statistics + { + private volatile long cpuTimeNanosWhenQueryStarted; + private volatile long heapAllocatedBytesWhenQueryStarted; + private volatile long waitingTimeNanos; + private volatile long transactionThreadId; + private volatile PageCursorTracer pageCursorTracer = PageCursorTracer.NULL; + private final KernelTransactionImplementation transaction; + private final AtomicReference cpuClockRef; + private final AtomicReference heapAllocationRef; + private CpuClock cpuClock; + private HeapAllocation heapAllocation; + + public Statistics( KernelTransactionImplementation transaction, AtomicReference cpuClockRef, + AtomicReference heapAllocationRef ) + { + this.transaction = transaction; + this.cpuClockRef = cpuClockRef; + this.heapAllocationRef = heapAllocationRef; + } + + protected void init( long threadId, PageCursorTracer pageCursorTracer ) + { + this.cpuClock = cpuClockRef.get(); + this.heapAllocation = heapAllocationRef.get(); + this.transactionThreadId = threadId; + this.pageCursorTracer = pageCursorTracer; + this.cpuTimeNanosWhenQueryStarted = cpuClock.cpuTimeNanos( transactionThreadId ); + this.heapAllocatedBytesWhenQueryStarted = heapAllocation.allocatedBytes( transactionThreadId ); + } + + /** + * Returns number of allocated bytes by current transaction. + * @return number of allocated bytes by the thread. + */ + long heapAllocatedBytes() + { + return heapAllocation.allocatedBytes( transactionThreadId ) - heapAllocatedBytesWhenQueryStarted; + } + + /** + * Returns amount of direct memory allocated by current transaction. + * + * @return amount of direct memory allocated by the thread in bytes. + */ + long directAllocatedBytes() + { + return transaction.collectionsFactory.getMemoryTracker().usedDirectMemory(); + } + + /** + * Return CPU time used by current transaction in milliseconds + * @return the current CPU time used by the transaction, in milliseconds. + */ + public long cpuTimeMillis() + { + long cpuTimeNanos = cpuClock.cpuTimeNanos( transactionThreadId ) - cpuTimeNanosWhenQueryStarted; + return NANOSECONDS.toMillis( cpuTimeNanos ); + } + + /** + * Return total number of page cache hits that current transaction performed + * @return total page cache hits + */ + long totalTransactionPageCacheHits() + { + return pageCursorTracer.accumulatedHits(); + } + + /** + * Return total number of page cache faults that current transaction performed + * @return total page cache faults + */ + long totalTransactionPageCacheFaults() + { + return pageCursorTracer.accumulatedFaults(); + } + + /** + * Report how long any particular query was waiting during it's execution + * @param waitTimeNanos query waiting time in nanoseconds + */ + @SuppressWarnings( "NonAtomicOperationOnVolatileField" ) + void addWaitingTime( long waitTimeNanos ) + { + waitingTimeNanos += waitTimeNanos; + } + + /** + * Accumulated transaction waiting time that includes waiting time of all already executed queries + * plus waiting time of currently executed query. + * @return accumulated transaction waiting time + * @param nowNanos current moment in nanoseconds + */ + long getWaitingTimeNanos( long nowNanos ) + { + ExecutingQueryList queryList = transaction.executingQueries(); + long waitingTime = waitingTimeNanos; + if ( queryList != null ) + { + Long latestQueryWaitingNanos = queryList.top( executingQuery -> + executingQuery.totalWaitingTimeNanos( nowNanos ) ); + waitingTime = latestQueryWaitingNanos != null ? waitingTime + latestQueryWaitingNanos : waitingTime; + } + return waitingTime; + } + + void reset() + { + pageCursorTracer = PageCursorTracer.NULL; + cpuTimeNanosWhenQueryStarted = 0; + heapAllocatedBytesWhenQueryStarted = 0; + waitingTimeNanos = 0; + transactionThreadId = -1; + } + } + + @Override + public ClockContext clocks() + { + return clocks; + } + + @Override + public NodeCursor ambientNodeCursor() + { + return operations.nodeCursor(); + } + + @Override + public RelationshipScanCursor ambientRelationshipCursor() + { + return operations.relationshipCursor(); + } + + @Override + public PropertyCursor ambientPropertyCursor() + { + return operations.propertyCursor(); + } + + /** + * It is not allowed for the same transaction to perform database writes as well as schema writes. + * This enum tracks the current write transactionStatus of the transaction, allowing it to transition from + * no writes (NONE) to data writes (DATA) or schema writes (SCHEMA), but it cannot transition between + * DATA and SCHEMA without throwing an InvalidTransactionTypeKernelException. Note that this behavior + * is orthogonal to the SecurityContext which manages what the transaction or statement is allowed to do + * based on authorization. + */ + private enum TransactionWriteState + { + NONE, + DATA + { + @Override + TransactionWriteState upgradeToSchemaWrites() throws InvalidTransactionTypeKernelException + { + throw new InvalidTransactionTypeKernelException( + "Cannot perform schema updates in a transaction that has performed data updates." ); + } + }, + SCHEMA + { + @Override + TransactionWriteState upgradeToDataWrites() throws InvalidTransactionTypeKernelException + { + throw new InvalidTransactionTypeKernelException( + "Cannot perform data updates in a transaction that has performed schema updates." ); + } + }; + + TransactionWriteState upgradeToDataWrites() throws InvalidTransactionTypeKernelException + { + return DATA; + } + + TransactionWriteState upgradeToSchemaWrites() throws InvalidTransactionTypeKernelException + { + return SCHEMA; + } + } +} diff --git a/external-properties/src/main/java/org/neo4j/kernel/impl/newapi/Operations.java b/external-properties/src/main/java/org/neo4j/kernel/impl/newapi/Operations.java new file mode 100644 index 00000000..f350b841 --- /dev/null +++ b/external-properties/src/main/java/org/neo4j/kernel/impl/newapi/Operations.java @@ -0,0 +1,1612 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +package org.neo4j.kernel.impl.newapi; + +import cn.pandadb.externalprops.ExternalPropertiesContext; +import cn.pandadb.util.GlobalContext; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.mutable.MutableInt; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.Optional; + +import org.neo4j.graphdb.factory.GraphDatabaseSettings; +import org.neo4j.internal.kernel.api.CursorFactory; +import org.neo4j.internal.kernel.api.ExplicitIndexRead; +import org.neo4j.internal.kernel.api.ExplicitIndexWrite; +import org.neo4j.internal.kernel.api.IndexQuery; +import org.neo4j.internal.kernel.api.IndexReference; +import org.neo4j.internal.kernel.api.Locks; +import org.neo4j.internal.kernel.api.NodeLabelIndexCursor; +import org.neo4j.internal.kernel.api.Procedures; +import org.neo4j.internal.kernel.api.Read; +import org.neo4j.internal.kernel.api.SchemaRead; +import org.neo4j.internal.kernel.api.SchemaWrite; +import org.neo4j.internal.kernel.api.Token; +import org.neo4j.internal.kernel.api.Write; +import org.neo4j.internal.kernel.api.exceptions.EntityNotFoundException; +import org.neo4j.internal.kernel.api.exceptions.KernelException; +import org.neo4j.internal.kernel.api.exceptions.TransactionFailureException; +import org.neo4j.internal.kernel.api.exceptions.explicitindex.AutoIndexingKernelException; +import org.neo4j.internal.kernel.api.exceptions.explicitindex.ExplicitIndexNotFoundKernelException; +import org.neo4j.internal.kernel.api.exceptions.schema.ConstraintValidationException; +import org.neo4j.internal.kernel.api.exceptions.schema.CreateConstraintFailureException; +import org.neo4j.internal.kernel.api.exceptions.schema.IndexNotApplicableKernelException; +import org.neo4j.internal.kernel.api.exceptions.schema.IndexNotFoundKernelException; +import org.neo4j.internal.kernel.api.exceptions.schema.SchemaKernelException; +import org.neo4j.internal.kernel.api.schema.IndexProviderDescriptor; +import org.neo4j.internal.kernel.api.schema.LabelSchemaDescriptor; +import org.neo4j.internal.kernel.api.schema.RelationTypeSchemaDescriptor; +import org.neo4j.internal.kernel.api.schema.SchemaDescriptor; +import org.neo4j.internal.kernel.api.schema.constraints.ConstraintDescriptor; +import org.neo4j.kernel.api.SilentTokenNameLookup; +import org.neo4j.kernel.api.StatementConstants; +import org.neo4j.kernel.api.exceptions.index.IndexEntryConflictException; +import org.neo4j.kernel.api.exceptions.schema.AlreadyConstrainedException; +import org.neo4j.kernel.api.exceptions.schema.AlreadyIndexedException; +import org.neo4j.kernel.api.exceptions.schema.DropConstraintFailureException; +import org.neo4j.kernel.api.exceptions.schema.DropIndexFailureException; +import org.neo4j.kernel.api.exceptions.schema.IndexBelongsToConstraintException; +import org.neo4j.kernel.api.exceptions.schema.IndexBrokenKernelException; +import org.neo4j.kernel.api.exceptions.schema.NoSuchConstraintException; +import org.neo4j.kernel.api.exceptions.schema.NoSuchIndexException; +import org.neo4j.kernel.api.exceptions.schema.RepeatedPropertyInCompositeSchemaException; +import org.neo4j.kernel.api.exceptions.schema.UnableToValidateConstraintException; +import org.neo4j.kernel.api.exceptions.schema.UniquePropertyValueValidationException; +import org.neo4j.kernel.api.explicitindex.AutoIndexing; +import org.neo4j.kernel.api.schema.constraints.ConstraintDescriptorFactory; +import org.neo4j.kernel.api.schema.constraints.IndexBackedConstraintDescriptor; +import org.neo4j.kernel.api.schema.constraints.NodeKeyConstraintDescriptor; +import org.neo4j.kernel.api.schema.constraints.UniquenessConstraintDescriptor; +import org.neo4j.kernel.api.txstate.ExplicitIndexTransactionState; +import org.neo4j.kernel.api.txstate.TransactionState; +import org.neo4j.kernel.configuration.Config; +import org.neo4j.kernel.impl.api.KernelTransactionImplementation; +import org.neo4j.kernel.impl.api.index.IndexingService; +import org.neo4j.kernel.impl.api.state.ConstraintIndexCreator; +import org.neo4j.kernel.impl.constraints.ConstraintSemantics; +import org.neo4j.kernel.impl.index.IndexEntityType; +import org.neo4j.kernel.impl.locking.ResourceTypes; +import org.neo4j.storageengine.api.EntityType; +import org.neo4j.storageengine.api.StorageReader; +import org.neo4j.storageengine.api.lock.ResourceType; +import org.neo4j.storageengine.api.schema.IndexDescriptor; +import org.neo4j.storageengine.api.schema.IndexDescriptorFactory; +import org.neo4j.values.storable.Value; +import org.neo4j.values.storable.Values; + +import static java.lang.Math.max; +import static java.lang.Math.min; +import static org.neo4j.internal.kernel.api.exceptions.schema.ConstraintValidationException.Phase.VALIDATION; +import static org.neo4j.internal.kernel.api.exceptions.schema.SchemaKernelException.OperationContext.CONSTRAINT_CREATION; +import static org.neo4j.internal.kernel.api.schema.SchemaDescriptor.schemaTokenLockingIds; +import static org.neo4j.kernel.api.StatementConstants.NO_SUCH_LABEL; +import static org.neo4j.kernel.api.StatementConstants.NO_SUCH_NODE; +import static org.neo4j.kernel.api.StatementConstants.NO_SUCH_PROPERTY_KEY; +import static org.neo4j.kernel.impl.locking.ResourceTypes.INDEX_ENTRY; +import static org.neo4j.kernel.impl.locking.ResourceTypes.indexEntryResourceId; +import static org.neo4j.kernel.impl.newapi.IndexTxStateUpdater.LabelChangeType.ADDED_LABEL; +import static org.neo4j.kernel.impl.newapi.IndexTxStateUpdater.LabelChangeType.REMOVED_LABEL; +import static org.neo4j.storageengine.api.EntityType.NODE; +import static org.neo4j.storageengine.api.schema.IndexDescriptor.Type.UNIQUE; +import static org.neo4j.values.storable.Values.NO_VALUE; + +// NOTE: pandadb +import org.neo4j.internal.kernel.api.exceptions.LabelNotFoundKernelException; +import org.neo4j.internal.kernel.api.exceptions.PropertyKeyIdNotFoundKernelException; +import cn.pandadb.externalprops.CustomPropertyNodeStore; +import cn.pandadb.externalprops.PropertyWriteTransaction; +import cn.pandadb.externalprops.NodeWithProperties; +import org.neo4j.values.virtual.NodeValue; +import scala.Option; +import scala.collection.mutable.Undoable; +// END-NOTE + +/** + * Collects all Kernel API operations and guards them from being used outside of transaction. + * + * Many methods assume cursors to be initialized before use in private methods, even if they're not passed in explicitly. + * Keep that in mind: e.g. nodeCursor, propertyCursor and relationshipCursor + */ +public class Operations implements Write, ExplicitIndexWrite, SchemaWrite +{ + private static final int[] EMPTY_INT_ARRAY = new int[0]; + + private final KernelTransactionImplementation ktx; + private final AllStoreHolder allStoreHolder; + private final KernelToken token; + private final StorageReader statement; + private final AutoIndexing autoIndexing; + private final IndexTxStateUpdater updater; + private final DefaultCursors cursors; + private final ConstraintIndexCreator constraintIndexCreator; + private final ConstraintSemantics constraintSemantics; + private final IndexingService indexingService; + private final Config config; + private DefaultNodeCursor nodeCursor; + private DefaultPropertyCursor propertyCursor; + private DefaultRelationshipScanCursor relationshipCursor; + + // NOTE: pandadb + private CustomPropertyWriteTransactionFacade customPropWriteTx; + // END-NOTE + + public Operations( AllStoreHolder allStoreHolder, IndexTxStateUpdater updater, StorageReader statement, KernelTransactionImplementation ktx, + KernelToken token, DefaultCursors cursors, AutoIndexing autoIndexing, ConstraintIndexCreator constraintIndexCreator, + ConstraintSemantics constraintSemantics, IndexingService indexingService, Config config ) + { + this.token = token; + this.autoIndexing = autoIndexing; + this.allStoreHolder = allStoreHolder; + this.ktx = ktx; + this.statement = statement; + this.updater = updater; + this.cursors = cursors; + this.constraintIndexCreator = constraintIndexCreator; + this.constraintSemantics = constraintSemantics; + this.indexingService = indexingService; + this.config = config; + // NOTE: pandadb + this.customPropWriteTx = new CustomPropertyWriteTransactionFacade(); + // END-NOTE + } + + public void initialize() + { + this.nodeCursor = cursors.allocateNodeCursor(); + this.propertyCursor = cursors.allocatePropertyCursor(); + this.relationshipCursor = cursors.allocateRelationshipScanCursor(); + + // NOTE: pandadb + this.customPropWriteTx = new CustomPropertyWriteTransactionFacade(); + // END-NOTE + } + + // NOTE: pandadb + public CustomPropertyWriteTransactionFacade customPropWriteTx() + { + return this.customPropWriteTx; + } + + public class CustomPropertyWriteTransactionFacade + { + private Option customPropertyStore; + private PropertyWriteTransaction customPropWrTx; + private Undoable commitedTxRes; + + public CustomPropertyWriteTransactionFacade() + { + this.customPropertyStore = ExternalPropertiesContext.maybeCustomPropertyNodeStore(); + if (this.isLeaderNode() && this.customPropertyStore.isDefined()) + { + this.customPropWrTx = this.customPropertyStore.get().beginWriteTransaction(); + } + + } + + private boolean isLeaderNode() + { + return GlobalContext.isLeaderNode(); + } + + private String getNodeLabelName(int label) + { + try + { + return token().nodeLabelName(label); + } + catch ( LabelNotFoundKernelException e ) + { + throw new IllegalStateException( "Label retrieved through kernel API should exist.", e ); + } + } + + private String getPropertyKeyName(int property) + { + try + { + return token().propertyKeyName(property); + } + catch ( PropertyKeyIdNotFoundKernelException e ) + { + throw new IllegalStateException( "Property key retrieved through kernel API should exist.", e ); + } + } + + private boolean isSavePropertyToCustom() + { + return this.isLeaderNode() && this.customPropWrTx != null; + } + + public boolean isPreventNeo4jPropStore() + { + return this.customPropertyStore.isDefined(); + } + + public void nodeCreate(long nodeId) + { + if (this.isSavePropertyToCustom()) { + this.customPropWrTx.addNode(nodeId); + } + } + + public Value nodeGetProperty(long nodeId, int propertyKeyId) + { + return this.nodeGetProperty(nodeId, getPropertyKeyName(propertyKeyId)); + } + + public Value nodeGetProperty(long nodeId, String propertyKey) + { + if (this.isPreventNeo4jPropStore()) { + Option rs = this.customPropWrTx.getPropertyValue(nodeId, propertyKey); + if (rs.isDefined()) { + return rs.get(); + } + } + return NO_VALUE; + } + + /* + public NodeValue getNode(long nodeId) + { + if (this.isPreventNeo4jPropStore()) { + NodeWithProperties node = this.customPropertyStore.get().getNodeById(nodeId).get(); + return node.toNeo4jNodeValue(); + } + return null; + } + */ + + public void nodeDelete(long nodeId) + { + if (this.isSavePropertyToCustom()) { + this.customPropWrTx.deleteNode(nodeId); + } + } + + public void nodeCreateWithLabelNames(long nodeId, Iterable labels) + { + if (this.isSavePropertyToCustom()) { + this.customPropWrTx.addNode(nodeId); + for (String label: labels){ + this.customPropWrTx.addLabel(nodeId,label); + } + } + } + + public void nodeCreateWithLabels(long nodeId, Iterable labels) + { + if (this.isSavePropertyToCustom()) { + this.customPropWrTx.addNode(nodeId); + for (int labelId : labels) { + this.customPropWrTx.addLabel(nodeId, getNodeLabelName(labelId)); + } + } + } + + public void nodeSetLabel(long nodeId, int label) + { + if (this.isSavePropertyToCustom()) { + this.customPropWrTx.addLabel(nodeId, getNodeLabelName(label)); + } + } + + public void nodeRemoveLabel(long nodeId, int label) + { + if (this.isSavePropertyToCustom()) { + this.customPropWrTx.removeLabel(nodeId, getNodeLabelName(label)); + } + } + + public void nodeSetProperty(long nodeId, int property, Value value) + { + if (this.isSavePropertyToCustom()) { + this.customPropWrTx.addProperty(nodeId, getPropertyKeyName(property), value); + } + } + + public void nodeRemoveProperty(long nodeId, int property) + { + if (this.isSavePropertyToCustom()) { + this.customPropWrTx.removeProperty(nodeId, getPropertyKeyName(property)); + } + } + + public void commit() + { + if (this.isSavePropertyToCustom()) { + this.commitedTxRes = this.customPropWrTx.commit(); + } + } + + public void rollback() + { + if (this.customPropWrTx != null) { + this.customPropWrTx.rollback(); + } + } + + public void undo() + { + if (this.isSavePropertyToCustom()) { + if (this.commitedTxRes != null) { + this.commitedTxRes.undo(); + } + } + } + + public void close() + { + if (this.customPropWrTx != null) { + this.customPropWrTx.close(); + } + } + } + // END-NOTE + + @Override + public long nodeCreate() + { + ktx.assertOpen(); + long nodeId = statement.reserveNode(); + ktx.txState().nodeDoCreate( nodeId ); + // NOTE: pandadb + this.customPropWriteTx.nodeCreate(nodeId); + // END-NOTE + return nodeId; + } + + @Override + public long nodeCreateWithLabels( int[] labels ) throws ConstraintValidationException + { + if ( labels == null || labels.length == 0 ) + { + return nodeCreate(); + } + + // We don't need to check the node for existence, like we do in nodeAddLabel, because we just created it. + // We also don't need to check if the node already has some of the labels, because we know it has none. + // And we don't need to take the exclusive lock on the node, because it was created in this transaction and + // isn't visible to anyone else yet. + ktx.assertOpen(); + long[] lockingIds = SchemaDescriptor.schemaTokenLockingIds( labels ); + Arrays.sort( lockingIds ); // Sort to ensure labels are locked and assigned in order. + ktx.statementLocks().optimistic().acquireShared( ktx.lockTracer(), ResourceTypes.LABEL, lockingIds ); + long nodeId = statement.reserveNode(); + ktx.txState().nodeDoCreate( nodeId ); + // NOTE: pandadb + this.customPropWriteTx.nodeCreate(nodeId); + // END-NOTE + nodeCursor.single( nodeId, allStoreHolder ); + nodeCursor.next(); + + int prevLabel = NO_SUCH_LABEL; + for ( long lockingId : lockingIds ) + { + int label = (int) lockingId; + if ( label != prevLabel ) // Filter out duplicates. + { + checkConstraintsAndAddLabelToNode( nodeId, label ); + prevLabel = label; + } + } + return nodeId; + } + + @Override + public boolean nodeDelete( long node ) throws AutoIndexingKernelException + { + ktx.assertOpen(); + return nodeDelete( node, true ); + } + + @Override + public int nodeDetachDelete( final long nodeId ) throws KernelException + { + final MutableInt count = new MutableInt(); + TwoPhaseNodeForRelationshipLocking locking = new TwoPhaseNodeForRelationshipLocking( + relId -> + { + ktx.assertOpen(); + if ( relationshipDelete( relId, false ) ) + { + count.increment(); + } + }, ktx.statementLocks().optimistic(), ktx.lockTracer() ); + + locking.lockAllNodesAndConsumeRelationships( nodeId, ktx, ktx.ambientNodeCursor() ); + ktx.assertOpen(); + + //we are already holding the lock + nodeDelete( nodeId, false ); + return count.intValue(); + } + + @Override + public long relationshipCreate( long sourceNode, int relationshipType, long targetNode ) + throws EntityNotFoundException + { + ktx.assertOpen(); + + sharedSchemaLock( ResourceTypes.RELATIONSHIP_TYPE, relationshipType ); + lockRelationshipNodes( sourceNode, targetNode ); + + assertNodeExists( sourceNode ); + assertNodeExists( targetNode ); + + long id = statement.reserveRelationship(); + ktx.txState().relationshipDoCreate( id, relationshipType, sourceNode, targetNode ); + return id; + } + + @Override + public boolean relationshipDelete( long relationship ) throws AutoIndexingKernelException + { + ktx.assertOpen(); + return relationshipDelete( relationship, true ); + } + + @Override + public boolean nodeAddLabel( long node, int nodeLabel ) + throws EntityNotFoundException, ConstraintValidationException + { + sharedSchemaLock( ResourceTypes.LABEL, nodeLabel ); + acquireExclusiveNodeLock( node ); + + ktx.assertOpen(); + singleNode( node ); + + if ( nodeCursor.hasLabel( nodeLabel ) ) + { + //label already there, nothing to do + return false; + } + + checkConstraintsAndAddLabelToNode( node, nodeLabel ); + return true; + } + + private void checkConstraintsAndAddLabelToNode( long node, int nodeLabel ) + throws UniquePropertyValueValidationException, UnableToValidateConstraintException + { + // Load the property key id list for this node. We may need it for constraint validation if there are any related constraints, + // but regardless we need it for tx state updating + int[] existingPropertyKeyIds = loadSortedPropertyKeyList(); + + //Check so that we are not breaking uniqueness constraints + //We do this by checking if there is an existing node in the index that + //with the same label and property combination. + if ( existingPropertyKeyIds.length > 0 ) + { + for ( IndexBackedConstraintDescriptor uniquenessConstraint : indexingService.getRelatedUniquenessConstraints( new long[]{nodeLabel}, + existingPropertyKeyIds, NODE ) ) + { + IndexQuery.ExactPredicate[] propertyValues = getAllPropertyValues( uniquenessConstraint.schema(), + StatementConstants.NO_SUCH_PROPERTY_KEY, Values.NO_VALUE ); + if ( propertyValues != null ) + { + validateNoExistingNodeWithExactValues( uniquenessConstraint, propertyValues, node ); + } + } + } + + // NOTE: pandadb + this.customPropWriteTx.nodeSetLabel(node, nodeLabel); + // END-NOTE + //node is there and doesn't already have the label, let's add + ktx.txState().nodeDoAddLabel( nodeLabel, node ); + updater.onLabelChange( nodeLabel, existingPropertyKeyIds, nodeCursor, propertyCursor, ADDED_LABEL ); + } + + private int[] loadSortedPropertyKeyList() + { + nodeCursor.properties( propertyCursor ); + if ( !propertyCursor.next() ) + { + return EMPTY_INT_ARRAY; + } + + int[] propertyKeyIds = new int[4]; // just some arbitrary starting point, it grows on demand + int cursor = 0; + do + { + if ( cursor == propertyKeyIds.length ) + { + propertyKeyIds = Arrays.copyOf( propertyKeyIds, cursor * 2 ); + } + propertyKeyIds[cursor++] = propertyCursor.propertyKey(); + } + while ( propertyCursor.next() ); + if ( cursor != propertyKeyIds.length ) + { + propertyKeyIds = Arrays.copyOf( propertyKeyIds, cursor ); + } + Arrays.sort( propertyKeyIds ); + return propertyKeyIds; + } + + private boolean nodeDelete( long node, boolean lock ) throws AutoIndexingKernelException + { + ktx.assertOpen(); + + if ( ktx.hasTxStateWithChanges() ) + { + if ( ktx.txState().nodeIsAddedInThisTx( node ) ) + { + autoIndexing.nodes().entityRemoved( this, node ); + // NOTE: pandadb + this.customPropWriteTx.nodeDelete(node); + // END-NOTE + ktx.txState().nodeDoDelete( node ); + return true; + } + if ( ktx.txState().nodeIsDeletedInThisTx( node ) ) + { + // already deleted + return false; + } + } + + if ( lock ) + { + ktx.statementLocks().optimistic().acquireExclusive( ktx.lockTracer(), ResourceTypes.NODE, node ); + } + + allStoreHolder.singleNode( node, nodeCursor ); + if ( nodeCursor.next() ) + { + acquireSharedNodeLabelLocks(); + + autoIndexing.nodes().entityRemoved( this, node ); + // NOTE: pandadb + this.customPropWriteTx.nodeDelete(node); + // END-NOTE + ktx.txState().nodeDoDelete( node ); + return true; + } + + // tried to delete node that does not exist + return false; + } + + /** + * Assuming that the nodeCursor have been initialized to the node that labels are retrieved from + */ + private long[] acquireSharedNodeLabelLocks() + { + long[] labels = nodeCursor.labels().all(); + ktx.statementLocks().optimistic().acquireShared( ktx.lockTracer(), ResourceTypes.LABEL, labels ); + return labels; + } + + private boolean relationshipDelete( long relationship, boolean lock ) throws AutoIndexingKernelException + { + allStoreHolder.singleRelationship( relationship, relationshipCursor ); // tx-state aware + + if ( relationshipCursor.next() ) + { + if ( lock ) + { + lockRelationshipNodes( relationshipCursor.sourceNodeReference(), + relationshipCursor.targetNodeReference() ); + acquireExclusiveRelationshipLock( relationship ); + } + if ( !allStoreHolder.relationshipExists( relationship ) ) + { + return false; + } + + ktx.assertOpen(); + + autoIndexing.relationships().entityRemoved( this, relationship ); + + TransactionState txState = ktx.txState(); + if ( txState.relationshipIsAddedInThisTx( relationship ) ) + { + txState.relationshipDoDeleteAddedInThisTx( relationship ); + } + else + { + txState.relationshipDoDelete( relationship, relationshipCursor.type(), + relationshipCursor.sourceNodeReference(), relationshipCursor.targetNodeReference() ); + } + return true; + } + + // tried to delete relationship that does not exist + return false; + } + + private void singleNode( long node ) throws EntityNotFoundException + { + allStoreHolder.singleNode( node, nodeCursor ); + if ( !nodeCursor.next() ) + { + throw new EntityNotFoundException( NODE, node ); + } + } + + private void singleRelationship( long relationship ) throws EntityNotFoundException + { + allStoreHolder.singleRelationship( relationship, relationshipCursor ); + if ( !relationshipCursor.next() ) + { + throw new EntityNotFoundException( EntityType.RELATIONSHIP, relationship ); + } + } + + /** + * Fetch the property values for all properties in schema for a given node. Return these as an exact predicate + * array. + */ + private IndexQuery.ExactPredicate[] getAllPropertyValues( SchemaDescriptor schema, int changedPropertyKeyId, + Value changedValue ) + { + int[] schemaPropertyIds = schema.getPropertyIds(); + IndexQuery.ExactPredicate[] values = new IndexQuery.ExactPredicate[schemaPropertyIds.length]; + + int nMatched = 0; + nodeCursor.properties( propertyCursor ); + while ( propertyCursor.next() ) + { + int nodePropertyId = propertyCursor.propertyKey(); + int k = ArrayUtils.indexOf( schemaPropertyIds, nodePropertyId ); + if ( k >= 0 ) + { + if ( nodePropertyId != StatementConstants.NO_SUCH_PROPERTY_KEY ) + { + values[k] = IndexQuery.exact( nodePropertyId, propertyCursor.propertyValue() ); + } + nMatched++; + } + } + + //This is true if we are adding a property + if ( changedPropertyKeyId != NO_SUCH_PROPERTY_KEY ) + { + int k = ArrayUtils.indexOf( schemaPropertyIds, changedPropertyKeyId ); + if ( k >= 0 ) + { + values[k] = IndexQuery.exact( changedPropertyKeyId, changedValue ); + nMatched++; + } + } + + if ( nMatched < values.length ) + { + return null; + } + return values; + } + + /** + * Check so that there is not an existing node with the exact match of label and property + */ + private void validateNoExistingNodeWithExactValues( IndexBackedConstraintDescriptor constraint, + IndexQuery.ExactPredicate[] propertyValues, long modifiedNode + ) throws UniquePropertyValueValidationException, UnableToValidateConstraintException + { + IndexDescriptor schemaIndexDescriptor = constraint.ownedIndexDescriptor(); + IndexReference indexReference = allStoreHolder.indexGetCapability( schemaIndexDescriptor ); + try ( DefaultNodeValueIndexCursor valueCursor = cursors.allocateNodeValueIndexCursor(); + IndexReaders indexReaders = new IndexReaders( indexReference, allStoreHolder ) ) + { + assertIndexOnline( schemaIndexDescriptor ); + int labelId = schemaIndexDescriptor.schema().keyId(); + + //Take a big fat lock, and check for existing node in index + ktx.statementLocks().optimistic().acquireExclusive( + ktx.lockTracer(), INDEX_ENTRY, + indexEntryResourceId( labelId, propertyValues ) + ); + + allStoreHolder.nodeIndexSeekWithFreshIndexReader( valueCursor, indexReaders.createReader(), propertyValues ); + if ( valueCursor.next() && valueCursor.nodeReference() != modifiedNode ) + { + throw new UniquePropertyValueValidationException( constraint, VALIDATION, + new IndexEntryConflictException( valueCursor.nodeReference(), NO_SUCH_NODE, + IndexQuery.asValueTuple( propertyValues ) ) ); + } + } + catch ( IndexNotFoundKernelException | IndexBrokenKernelException | IndexNotApplicableKernelException e ) + { + throw new UnableToValidateConstraintException( constraint, e ); + } + } + + private void assertIndexOnline( IndexDescriptor descriptor ) + throws IndexNotFoundKernelException, IndexBrokenKernelException + { + switch ( allStoreHolder.indexGetState( descriptor ) ) + { + case ONLINE: + return; + default: + throw new IndexBrokenKernelException( allStoreHolder.indexGetFailure( descriptor ) ); + } + } + + @Override + public boolean nodeRemoveLabel( long node, int labelId ) throws EntityNotFoundException + { + acquireExclusiveNodeLock( node ); + ktx.assertOpen(); + + singleNode( node ); + + if ( !nodeCursor.hasLabel( labelId ) ) + { + //the label wasn't there, nothing to do + return false; + } + + sharedSchemaLock( ResourceTypes.LABEL, labelId ); + + // NOTE: pandadb + this.customPropWriteTx.nodeRemoveLabel(node, labelId); + // END-NOTE + + ktx.txState().nodeDoRemoveLabel( labelId, node ); + if ( indexingService.hasRelatedSchema( labelId, NODE ) ) + { + updater.onLabelChange( labelId, loadSortedPropertyKeyList(), nodeCursor, propertyCursor, REMOVED_LABEL ); + } + return true; + } + + @Override + public Value nodeSetProperty( long node, int propertyKey, Value value ) + throws EntityNotFoundException, ConstraintValidationException, AutoIndexingKernelException + { + acquireExclusiveNodeLock( node ); + ktx.assertOpen(); + + singleNode( node ); + long[] labels = acquireSharedNodeLabelLocks(); + Value existingValue = readNodeProperty( propertyKey ); + int[] existingPropertyKeyIds = null; + boolean hasRelatedSchema = indexingService.hasRelatedSchema( labels, propertyKey, NODE ); + if ( hasRelatedSchema ) + { + existingPropertyKeyIds = loadSortedPropertyKeyList(); + } + + if ( hasRelatedSchema && !existingValue.equals( value ) ) + { + // The value changed and there may be relevant constraints to check so let's check those now. + Collection uniquenessConstraints = indexingService.getRelatedUniquenessConstraints( labels, propertyKey, NODE ); + NodeSchemaMatcher.onMatchingSchema( uniquenessConstraints.iterator(), propertyKey, existingPropertyKeyIds, + uniquenessConstraint -> + { + validateNoExistingNodeWithExactValues( uniquenessConstraint, getAllPropertyValues( uniquenessConstraint.schema(), propertyKey, value ), + node ); + }); + } + + if ( existingValue == NO_VALUE ) + { + //no existing value, we just add it + autoIndexing.nodes().propertyAdded( this, node, propertyKey, value ); + + // NOTE: pandadb + this.customPropWriteTx.nodeSetProperty(node, propertyKey, value); + //if(!this.customPropWriteTx.isPreventNeo4jPropStore()) + //{ + // ktx.txState().nodeDoAddProperty( node, propertyKey, value ); + // } + // END-NOTE + ktx.txState().nodeDoAddProperty( node, propertyKey, value ); + + if ( hasRelatedSchema ) + { + updater.onPropertyAdd( nodeCursor, propertyCursor, labels, propertyKey, existingPropertyKeyIds, value ); + } + return NO_VALUE; + } + else + { + // We need to auto-index even if not actually changing the value. + autoIndexing.nodes().propertyChanged( this, node, propertyKey, existingValue, value ); + if ( propertyHasChanged( value, existingValue ) ) + { + //the value has changed to a new value + + // NOTE: pandadb + this.customPropWriteTx.nodeSetProperty(node, propertyKey, value); + // if(!this.customPropWriteTx.isPreventNeo4jPropStore()) + // { + // ktx.txState().nodeDoAddProperty( node, propertyKey, value ); + // } + // END-NOTE + ktx.txState().nodeDoChangeProperty( node, propertyKey, value ); + + if ( hasRelatedSchema ) + { + updater.onPropertyChange( nodeCursor, propertyCursor, labels, propertyKey, existingPropertyKeyIds, existingValue, value ); + } + } + return existingValue; + } + } + + @Override + public Value nodeRemoveProperty( long node, int propertyKey ) + throws EntityNotFoundException, AutoIndexingKernelException + { + acquireExclusiveNodeLock( node ); + ktx.assertOpen(); + singleNode( node ); + + // NOTE: pandadb + this.customPropWriteTx.nodeRemoveProperty(node, propertyKey); + // END-NOTE + + Value existingValue = readNodeProperty( propertyKey ); + + if ( existingValue != NO_VALUE ) + { + long[] labels = acquireSharedNodeLabelLocks(); + autoIndexing.nodes().propertyRemoved( this, node, propertyKey ); + ktx.txState().nodeDoRemoveProperty( node, propertyKey ); + if ( indexingService.hasRelatedSchema( labels, propertyKey, NODE ) ) + { + updater.onPropertyRemove( nodeCursor, propertyCursor, labels, propertyKey, loadSortedPropertyKeyList(), existingValue ); + } + } + + return existingValue; + } + + @Override + public Value relationshipSetProperty( long relationship, int propertyKey, Value value ) + throws EntityNotFoundException, AutoIndexingKernelException + { + acquireExclusiveRelationshipLock( relationship ); + ktx.assertOpen(); + singleRelationship( relationship ); + Value existingValue = readRelationshipProperty( propertyKey ); + if ( existingValue == NO_VALUE ) + { + autoIndexing.relationships().propertyAdded( this, relationship, propertyKey, value ); + ktx.txState().relationshipDoReplaceProperty( relationship, propertyKey, NO_VALUE, value ); + return NO_VALUE; + } + else + { + // We need to auto-index even if not actually changing the value. + autoIndexing.relationships().propertyChanged( this, relationship, propertyKey, existingValue, value ); + if ( propertyHasChanged( existingValue, value ) ) + { + + ktx.txState().relationshipDoReplaceProperty( relationship, propertyKey, existingValue, value ); + } + + return existingValue; + } + } + + @Override + public Value relationshipRemoveProperty( long relationship, int propertyKey ) + throws EntityNotFoundException, AutoIndexingKernelException + { + acquireExclusiveRelationshipLock( relationship ); + ktx.assertOpen(); + singleRelationship( relationship ); + Value existingValue = readRelationshipProperty( propertyKey ); + + if ( existingValue != NO_VALUE ) + { + autoIndexing.relationships().propertyRemoved( this, relationship, propertyKey ); + ktx.txState().relationshipDoRemoveProperty( relationship, propertyKey ); + } + + return existingValue; + } + + @Override + public Value graphSetProperty( int propertyKey, Value value ) + { + ktx.statementLocks().optimistic() + .acquireExclusive( ktx.lockTracer(), ResourceTypes.GRAPH_PROPS, ResourceTypes.graphPropertyResource() ); + ktx.assertOpen(); + + Value existingValue = readGraphProperty( propertyKey ); + if ( !existingValue.equals( value ) ) + { + ktx.txState().graphDoReplaceProperty( propertyKey, existingValue, value ); + } + return existingValue; + } + + @Override + public Value graphRemoveProperty( int propertyKey ) + { + ktx.statementLocks().optimistic() + .acquireExclusive( ktx.lockTracer(), ResourceTypes.GRAPH_PROPS, ResourceTypes.graphPropertyResource() ); + ktx.assertOpen(); + Value existingValue = readGraphProperty( propertyKey ); + if ( existingValue != Values.NO_VALUE ) + { + ktx.txState().graphDoRemoveProperty( propertyKey ); + } + return existingValue; + } + + @Override + public void nodeAddToExplicitIndex( String indexName, long node, String key, Object value ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().nodeChanges( indexName ).addNode( node, key, value ); + } + + @Override + public void nodeRemoveFromExplicitIndex( String indexName, long node ) throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().nodeChanges( indexName ).remove( node ); + } + + @Override + public void nodeRemoveFromExplicitIndex( String indexName, long node, String key, Object value ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().nodeChanges( indexName ).remove( node, key, value ); + } + + @Override + public void nodeRemoveFromExplicitIndex( String indexName, long node, String key ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().nodeChanges( indexName ).remove( node, key ); + } + + @Override + public void nodeExplicitIndexCreate( String indexName, Map customConfig ) + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().createIndex( IndexEntityType.Node, indexName, customConfig ); + } + + @Override + public void nodeExplicitIndexCreateLazily( String indexName, Map customConfig ) + { + ktx.assertOpen(); + allStoreHolder.getOrCreateNodeIndexConfig( indexName, customConfig ); + } + + @Override + public void nodeExplicitIndexDrop( String indexName ) throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + ExplicitIndexTransactionState txState = allStoreHolder.explicitIndexTxState(); + txState.nodeChanges( indexName ).drop(); + txState.deleteIndex( IndexEntityType.Node, indexName ); + } + + @Override + public String nodeExplicitIndexSetConfiguration( String indexName, String key, String value ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + return allStoreHolder.explicitIndexStore().setNodeIndexConfiguration( indexName, key, value ); + } + + @Override + public String nodeExplicitIndexRemoveConfiguration( String indexName, String key ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + return allStoreHolder.explicitIndexStore().removeNodeIndexConfiguration( indexName, key ); + } + + @Override + public void relationshipAddToExplicitIndex( String indexName, long relationship, String key, Object value ) + throws ExplicitIndexNotFoundKernelException, EntityNotFoundException + { + ktx.assertOpen(); + allStoreHolder.singleRelationship( relationship, relationshipCursor ); + if ( relationshipCursor.next() ) + { + allStoreHolder.explicitIndexTxState().relationshipChanges( indexName ).addRelationship( relationship, key, value, + relationshipCursor.sourceNodeReference(), relationshipCursor.targetNodeReference() ); + } + else + { + throw new EntityNotFoundException( EntityType.RELATIONSHIP, relationship ); + } + } + + @Override + public void relationshipRemoveFromExplicitIndex( String indexName, long relationship, String key, Object value ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().relationshipChanges( indexName ).remove( relationship, key, value ); + } + + @Override + public void relationshipRemoveFromExplicitIndex( String indexName, long relationship, String key ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().relationshipChanges( indexName ).remove( relationship, key ); + + } + + @Override + public void relationshipRemoveFromExplicitIndex( String indexName, long relationship ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().relationshipChanges( indexName ).remove( relationship ); + } + + @Override + public void relationshipExplicitIndexCreate( String indexName, Map customConfig ) + { + ktx.assertOpen(); + allStoreHolder.explicitIndexTxState().createIndex( IndexEntityType.Relationship, indexName, customConfig ); + } + + @Override + public void relationshipExplicitIndexCreateLazily( String indexName, Map customConfig ) + { + ktx.assertOpen(); + allStoreHolder.getOrCreateRelationshipIndexConfig( indexName, customConfig ); + } + + @Override + public void relationshipExplicitIndexDrop( String indexName ) throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + ExplicitIndexTransactionState txState = allStoreHolder.explicitIndexTxState(); + txState.relationshipChanges( indexName ).drop(); + txState.deleteIndex( IndexEntityType.Relationship, indexName ); + } + + private Value readNodeProperty( int propertyKey ) + { + nodeCursor.properties( propertyCursor ); + + //Find out if the property had a value + Value existingValue = NO_VALUE; + while ( propertyCursor.next() ) + { + if ( propertyCursor.propertyKey() == propertyKey ) + { + existingValue = propertyCursor.propertyValue(); + break; + } + } + return existingValue; + } + + private Value readRelationshipProperty( int propertyKey ) + { + relationshipCursor.properties( propertyCursor ); + + //Find out if the property had a value + Value existingValue = NO_VALUE; + while ( propertyCursor.next() ) + { + if ( propertyCursor.propertyKey() == propertyKey ) + { + existingValue = propertyCursor.propertyValue(); + break; + } + } + return existingValue; + } + + private Value readGraphProperty( int propertyKey ) + { + allStoreHolder.graphProperties( propertyCursor ); + + //Find out if the property had a value + Value existingValue = NO_VALUE; + while ( propertyCursor.next() ) + { + if ( propertyCursor.propertyKey() == propertyKey ) + { + existingValue = propertyCursor.propertyValue(); + break; + } + } + return existingValue; + } + + public CursorFactory cursors() + { + return cursors; + } + + public Procedures procedures() + { + return allStoreHolder; + } + + public void release() + { + if ( nodeCursor != null ) + { + nodeCursor.close(); + nodeCursor = null; + } + if ( propertyCursor != null ) + { + propertyCursor.close(); + propertyCursor = null; + } + if ( relationshipCursor != null ) + { + relationshipCursor.close(); + relationshipCursor = null; + } + + cursors.assertClosed(); + cursors.release(); + } + + public Token token() + { + return token; + } + + public ExplicitIndexRead indexRead() + { + return allStoreHolder; + } + + public SchemaRead schemaRead() + { + return allStoreHolder; + } + + public Read dataRead() + { + return allStoreHolder; + } + + public DefaultNodeCursor nodeCursor() + { + return nodeCursor; + } + + public DefaultRelationshipScanCursor relationshipCursor() + { + return relationshipCursor; + } + + public DefaultPropertyCursor propertyCursor() + { + return propertyCursor; + } + + @Override + public IndexReference indexCreate( SchemaDescriptor descriptor ) throws SchemaKernelException + { + return indexCreate( descriptor, config.get( GraphDatabaseSettings.default_schema_provider ), Optional.empty() ); + } + + @Override + public IndexReference indexCreate( SchemaDescriptor descriptor, Optional indexName ) throws SchemaKernelException + { + return indexCreate( descriptor, config.get( GraphDatabaseSettings.default_schema_provider ), indexName ); + } + + @Override + public IndexReference indexCreate( SchemaDescriptor descriptor, + String provider, + Optional name ) throws SchemaKernelException + { + exclusiveSchemaLock( descriptor ); + ktx.assertOpen(); + assertValidDescriptor( descriptor, SchemaKernelException.OperationContext.INDEX_CREATION ); + assertIndexDoesNotExist( SchemaKernelException.OperationContext.INDEX_CREATION, descriptor, name ); + + IndexProviderDescriptor providerDescriptor = indexingService.indexProviderByName( provider ); + IndexDescriptor index = IndexDescriptorFactory.forSchema( descriptor, name, providerDescriptor ); + index = indexingService.getBlessedDescriptorFromProvider( index ); + ktx.txState().indexDoAdd( index ); + return index; + } + + // Note: this will be sneakily executed by an internal transaction, so no additional locking is required. + public IndexDescriptor indexUniqueCreate( SchemaDescriptor schema, String provider ) throws SchemaKernelException + { + IndexProviderDescriptor providerDescriptor = indexingService.indexProviderByName( provider ); + IndexDescriptor index = + IndexDescriptorFactory.uniqueForSchema( schema, + Optional.empty(), + providerDescriptor ); + index = indexingService.getBlessedDescriptorFromProvider( index ); + ktx.txState().indexDoAdd( index ); + return index; + } + + @Override + public void indexDrop( IndexReference indexReference ) throws SchemaKernelException + { + assertValidIndex( indexReference ); + IndexDescriptor index = (IndexDescriptor) indexReference; + SchemaDescriptor schema = index.schema(); + + exclusiveSchemaLock( schema ); + ktx.assertOpen(); + try + { + IndexDescriptor existingIndex = allStoreHolder.indexGetForSchema( schema ); + + if ( existingIndex == null ) + { + throw new NoSuchIndexException( schema ); + } + + if ( existingIndex.type() == UNIQUE ) + { + if ( allStoreHolder.indexGetOwningUniquenessConstraintId( existingIndex ) != null ) + { + throw new IndexBelongsToConstraintException( schema ); + } + } + } + catch ( IndexBelongsToConstraintException | NoSuchIndexException e ) + { + throw new DropIndexFailureException( schema, e ); + } + ktx.txState().indexDoDrop( index ); + } + + @Override + public ConstraintDescriptor uniquePropertyConstraintCreate( SchemaDescriptor descriptor ) + throws SchemaKernelException + { + return uniquePropertyConstraintCreate( descriptor, config.get( GraphDatabaseSettings.default_schema_provider ) ); + } + + @Override + public ConstraintDescriptor uniquePropertyConstraintCreate( SchemaDescriptor descriptor, String provider ) + throws SchemaKernelException + { + //Lock + exclusiveSchemaLock( descriptor ); + ktx.assertOpen(); + + //Check data integrity + assertValidDescriptor( descriptor, SchemaKernelException.OperationContext.CONSTRAINT_CREATION ); + UniquenessConstraintDescriptor constraint = ConstraintDescriptorFactory.uniqueForSchema( descriptor ); + assertConstraintDoesNotExist( constraint ); + // It is not allowed to create uniqueness constraints on indexed label/property pairs + assertIndexDoesNotExist( SchemaKernelException.OperationContext.CONSTRAINT_CREATION, descriptor, Optional.empty() ); + + // Create constraints + indexBackedConstraintCreate( constraint, provider ); + return constraint; + } + + @Override + public ConstraintDescriptor nodeKeyConstraintCreate( LabelSchemaDescriptor descriptor ) throws SchemaKernelException + { + return nodeKeyConstraintCreate( descriptor, config.get( GraphDatabaseSettings.default_schema_provider ) ); + } + + @Override + public ConstraintDescriptor nodeKeyConstraintCreate( LabelSchemaDescriptor descriptor, String provider ) throws SchemaKernelException + { + //Lock + exclusiveSchemaLock( descriptor ); + ktx.assertOpen(); + + //Check data integrity + assertValidDescriptor( descriptor, SchemaKernelException.OperationContext.CONSTRAINT_CREATION ); + NodeKeyConstraintDescriptor constraint = ConstraintDescriptorFactory.nodeKeyForSchema( descriptor ); + assertConstraintDoesNotExist( constraint ); + // It is not allowed to create node key constraints on indexed label/property pairs + assertIndexDoesNotExist( SchemaKernelException.OperationContext.CONSTRAINT_CREATION, descriptor, Optional.empty() ); + + //enforce constraints + try ( NodeLabelIndexCursor nodes = cursors.allocateNodeLabelIndexCursor() ) + { + allStoreHolder.nodeLabelScan( descriptor.getLabelId(), nodes ); + constraintSemantics.validateNodeKeyConstraint( nodes, nodeCursor, propertyCursor, descriptor ); + } + + //create constraint + indexBackedConstraintCreate( constraint, provider ); + return constraint; + } + + @Override + public ConstraintDescriptor nodePropertyExistenceConstraintCreate( LabelSchemaDescriptor descriptor ) + throws SchemaKernelException + { + //Lock + exclusiveSchemaLock( descriptor ); + ktx.assertOpen(); + + //verify data integrity + assertValidDescriptor( descriptor, SchemaKernelException.OperationContext.CONSTRAINT_CREATION ); + ConstraintDescriptor constraint = ConstraintDescriptorFactory.existsForSchema( descriptor ); + assertConstraintDoesNotExist( constraint ); + + //enforce constraints + try ( NodeLabelIndexCursor nodes = cursors.allocateNodeLabelIndexCursor() ) + { + allStoreHolder.nodeLabelScan( descriptor.getLabelId(), nodes ); + constraintSemantics + .validateNodePropertyExistenceConstraint( nodes, nodeCursor, propertyCursor, descriptor ); + } + + //create constraint + ktx.txState().constraintDoAdd( constraint ); + return constraint; + } + + @Override + public ConstraintDescriptor relationshipPropertyExistenceConstraintCreate( RelationTypeSchemaDescriptor descriptor ) + throws SchemaKernelException + { + //Lock + exclusiveSchemaLock( descriptor ); + ktx.assertOpen(); + + //verify data integrity + assertValidDescriptor( descriptor, SchemaKernelException.OperationContext.CONSTRAINT_CREATION ); + ConstraintDescriptor constraint = ConstraintDescriptorFactory.existsForSchema( descriptor ); + assertConstraintDoesNotExist( constraint ); + + //enforce constraints + allStoreHolder.relationshipTypeScan( descriptor.getRelTypeId(), relationshipCursor ); + constraintSemantics + .validateRelationshipPropertyExistenceConstraint( relationshipCursor, propertyCursor, descriptor ); + + //Create + ktx.txState().constraintDoAdd( constraint ); + return constraint; + + } + + @Override + public String relationshipExplicitIndexSetConfiguration( String indexName, String key, String value ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + return allStoreHolder.explicitIndexStore().setRelationshipIndexConfiguration( indexName, key, value ); + } + + @Override + public String relationshipExplicitIndexRemoveConfiguration( String indexName, String key ) + throws ExplicitIndexNotFoundKernelException + { + ktx.assertOpen(); + return allStoreHolder.explicitIndexStore().removeRelationshipIndexConfiguration( indexName, key ); + } + + @Override + public void constraintDrop( ConstraintDescriptor descriptor ) throws SchemaKernelException + { + //Lock + SchemaDescriptor schema = descriptor.schema(); + exclusiveOptimisticLock( schema.keyType(), schema.keyId() ); + ktx.assertOpen(); + + //verify data integrity + try + { + assertConstraintExists( descriptor ); + } + catch ( NoSuchConstraintException e ) + { + throw new DropConstraintFailureException( descriptor, e ); + } + + //Drop it like it's hot + ktx.txState().constraintDoDrop( descriptor ); + } + + private void assertIndexDoesNotExist( SchemaKernelException.OperationContext context, SchemaDescriptor descriptor, Optional name ) + throws AlreadyIndexedException, AlreadyConstrainedException + { + IndexDescriptor existingIndex = allStoreHolder.indexGetForSchema( descriptor ); + if ( existingIndex == null && name.isPresent() ) + { + IndexReference indexReference = allStoreHolder.indexGetForName( name.get() ); + if ( indexReference != IndexReference.NO_INDEX ) + { + existingIndex = (IndexDescriptor) indexReference; + } + } + if ( existingIndex != null ) + { + // OK so we found a matching constraint index. We check whether or not it has an owner + // because this may have been a left-over constraint index from a previously failed + // constraint creation, due to crash or similar, hence the missing owner. + if ( existingIndex.type() == UNIQUE ) + { + if ( context != CONSTRAINT_CREATION || constraintIndexHasOwner( existingIndex ) ) + { + throw new AlreadyConstrainedException( ConstraintDescriptorFactory.uniqueForSchema( descriptor ), + context, new SilentTokenNameLookup( token ) ); + } + } + else + { + throw new AlreadyIndexedException( descriptor, context ); + } + } + } + + private void exclusiveOptimisticLock( ResourceType resource, long resourceId ) + { + ktx.statementLocks().optimistic().acquireExclusive( ktx.lockTracer(), resource, resourceId ); + } + + private void acquireExclusiveNodeLock( long node ) + { + if ( !ktx.hasTxStateWithChanges() || !ktx.txState().nodeIsAddedInThisTx( node ) ) + { + ktx.statementLocks().optimistic().acquireExclusive( ktx.lockTracer(), ResourceTypes.NODE, node ); + } + } + + private void acquireExclusiveRelationshipLock( long relationshipId ) + { + if ( !ktx.hasTxStateWithChanges() || !ktx.txState().relationshipIsAddedInThisTx( relationshipId ) ) + { + ktx.statementLocks().optimistic() + .acquireExclusive( ktx.lockTracer(), ResourceTypes.RELATIONSHIP, relationshipId ); + } + } + + private void sharedSchemaLock( ResourceType type, int tokenId ) + { + ktx.statementLocks().optimistic().acquireShared( ktx.lockTracer(), type, tokenId ); + } + + private void exclusiveSchemaLock( SchemaDescriptor schema ) + { + long[] lockingIds = schemaTokenLockingIds( schema ); + ktx.statementLocks().optimistic().acquireExclusive( ktx.lockTracer(), schema.keyType(), lockingIds ); + } + + private void lockRelationshipNodes( long startNodeId, long endNodeId ) + { + // Order the locks to lower the risk of deadlocks with other threads creating/deleting rels concurrently + acquireExclusiveNodeLock( min( startNodeId, endNodeId ) ); + if ( startNodeId != endNodeId ) + { + acquireExclusiveNodeLock( max( startNodeId, endNodeId ) ); + } + } + + private static boolean propertyHasChanged( Value lhs, Value rhs ) + { + //It is not enough to check equality here since by our equality semantics `int == tofloat(int)` is `true` + //so by only checking for equality users cannot change type of property without also "changing" the value. + //Hence the extra type check here. + return lhs.getClass() != rhs.getClass() || !lhs.equals( rhs ); + } + + private void assertNodeExists( long sourceNode ) throws EntityNotFoundException + { + if ( !allStoreHolder.nodeExists( sourceNode ) ) + { + throw new EntityNotFoundException( NODE, sourceNode ); + } + } + + private boolean constraintIndexHasOwner( IndexDescriptor descriptor ) + { + return allStoreHolder.indexGetOwningUniquenessConstraintId( descriptor ) != null; + } + + private void assertConstraintDoesNotExist( ConstraintDescriptor constraint ) + throws AlreadyConstrainedException + { + if ( allStoreHolder.constraintExists( constraint ) ) + { + throw new AlreadyConstrainedException( constraint, + SchemaKernelException.OperationContext.CONSTRAINT_CREATION, + new SilentTokenNameLookup( token ) ); + } + } + + public Locks locks() + { + return allStoreHolder; + } + + private void assertConstraintExists( ConstraintDescriptor constraint ) + throws NoSuchConstraintException + { + if ( !allStoreHolder.constraintExists( constraint ) ) + { + throw new NoSuchConstraintException( constraint ); + } + } + + private static void assertValidDescriptor( SchemaDescriptor descriptor, SchemaKernelException.OperationContext context ) + throws RepeatedPropertyInCompositeSchemaException + { + int numUnique = Arrays.stream( descriptor.getPropertyIds() ).distinct().toArray().length; + if ( numUnique != descriptor.getPropertyIds().length ) + { + throw new RepeatedPropertyInCompositeSchemaException( descriptor, context ); + } + } + + private void indexBackedConstraintCreate( IndexBackedConstraintDescriptor constraint, String provider ) + throws CreateConstraintFailureException + { + SchemaDescriptor descriptor = constraint.schema(); + try + { + if ( ktx.hasTxStateWithChanges() && + ktx.txState().indexDoUnRemove( constraint.ownedIndexDescriptor() ) ) // ..., DROP, *CREATE* + { // creation is undoing a drop + if ( !ktx.txState().constraintDoUnRemove( constraint ) ) // CREATE, ..., DROP, *CREATE* + { // ... the drop we are undoing did itself undo a prior create... + ktx.txState().constraintDoAdd( + constraint, ktx.txState().indexCreatedForConstraint( constraint ) ); + } + } + else // *CREATE* + { // create from scratch + Iterator it = allStoreHolder.constraintsGetForSchema( descriptor ); + while ( it.hasNext() ) + { + if ( it.next().equals( constraint ) ) + { + return; + } + } + long indexId = constraintIndexCreator.createUniquenessConstraintIndex( ktx, descriptor, provider ); + if ( !allStoreHolder.constraintExists( constraint ) ) + { + // This looks weird, but since we release the label lock while awaiting population of the index + // backing this constraint there can be someone else getting ahead of us, creating this exact + // constraint + // before we do, so now getting out here under the lock we must check again and if it exists + // we must at this point consider this an idempotent operation because we verified earlier + // that it didn't exist and went on to create it. + ktx.txState().constraintDoAdd( constraint, indexId ); + } + } + } + catch ( UniquePropertyValueValidationException | TransactionFailureException | AlreadyConstrainedException e ) + { + throw new CreateConstraintFailureException( constraint, e ); + } + } + + private static void assertValidIndex( IndexReference index ) throws NoSuchIndexException + { + if ( index == IndexReference.NO_INDEX ) + { + throw new NoSuchIndexException( index.schema() ); + } + } +} diff --git a/external-properties/src/main/scala/cn/pandadb/externalprops/ExternalPropertyStore.scala b/external-properties/src/main/scala/cn/pandadb/externalprops/ExternalPropertyStore.scala new file mode 100644 index 00000000..37bc79fc --- /dev/null +++ b/external-properties/src/main/scala/cn/pandadb/externalprops/ExternalPropertyStore.scala @@ -0,0 +1,369 @@ +package cn.pandadb.externalprops + +import cn.pandadb.util.{ClosableModuleComponent, Configuration, PandaException} +import org.neo4j.cypher.internal.runtime.interpreted.NFPredicate +import org.neo4j.values.storable.{Value, Values} +import org.neo4j.values.virtual.{NodeValue, VirtualValues} + +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer + +/** + * Created by bluejoe on 2019/10/7. + */ +trait ExternalPropertyStoreFactory { + def create(conf: Configuration): CustomPropertyNodeStore; +} + +trait CustomPropertyNodeReader { + def filterNodes(expr: NFPredicate): Iterable[Long]; + + def getNodesByLabel(label: String): Iterable[Long]; + + def getNodeBylabelAndFilter(label: String, expr: NFPredicate): Iterable[Long]; + + @deprecated + def getNodeById(id: Long): Option[NodeWithProperties]; +} + +trait CustomPropertyNodeStore extends CustomPropertyNodeReader with ClosableModuleComponent { + def beginWriteTransaction(): PropertyWriteTransaction; +} + +trait PropertyWriter { + def deleteNode(nodeId: Long); + + def addNode(nodeId: Long); + + def addProperty(nodeId: Long, key: String, value: Value): Unit; + + def removeProperty(nodeId: Long, key: String); + + def updateProperty(nodeId: Long, key: String, value: Value): Unit; + + def addLabel(nodeId: Long, label: String): Unit; + + def removeLabel(nodeId: Long, label: String): Unit; +} + +trait PropertyReaderWithinTransaction { + def getNodeLabels(nodeId: Long): Array[String]; + + def getPropertyValue(nodeId: Long, key: String): Option[Value]; +} + +trait PropertyWriteTransaction extends PropertyWriter with PropertyReaderWithinTransaction { + @throws[FailedToCommitTransaction] + def commit(): mutable.Undoable; + + @throws[FailedToRollbackTransaction] + def rollback(): Unit; + + def close(): Unit; +} + +class FailedToCommitTransaction(tx: PropertyWriteTransaction, cause: Throwable) + extends PandaException("failed to commit transaction: $tx") { + +} + +class FailedToRollbackTransaction(tx: PropertyWriteTransaction, cause: Throwable) + extends PandaException("failed to roll back transaction: $tx") { + +} + +case class NodeWithProperties(id: Long, props: Map[String, Value], labels: Iterable[String]) { + def toNeo4jNodeValue(): NodeValue = { + VirtualValues.nodeValue(id, + Values.stringArray(labels.toArray: _*), + VirtualValues.map(props.keys.toArray, props.values.toArray)) + } + + def mutable(): MutableNodeWithProperties = { + val m = MutableNodeWithProperties(id); + m.props ++= props; + m.labels ++= labels; + m; + } +} + +case class MutableNodeWithProperties(id: Long) { + val props = mutable.Map[String, Value](); + val labels = ArrayBuffer[String](); +} + +class BufferedExternalPropertyWriteTransaction( + nodeReader: CustomPropertyNodeReader, + commitPerformer: GroupedOpVisitor, + undoPerformer: GroupedOpVisitor) + extends PropertyWriteTransaction { + val bufferedOps = ArrayBuffer[BufferedPropertyOp](); + val oldState = mutable.Map[Long, MutableNodeWithProperties](); + val newState = mutable.Map[Long, MutableNodeWithProperties](); + private var isCommited = false + override def deleteNode(nodeId: Long): Unit = { + getPopulatedNode(nodeId) + //bufferedOps += BufferedDeleteNodeOp(nodeId) + newState.remove(nodeId) + } + + //get node related info when required + private def getPopulatedNode(nodeId: Long): MutableNodeWithProperties = { + + if (isNodeExitInNewState(nodeId)) newState.get(nodeId).get + else { + if (!isNodeExitInOldState(nodeId)) { + val state = nodeReader.getNodeById(nodeId).get //get from database,if failue throw exception no such node + oldState += nodeId -> state.mutable() + newState += nodeId -> state.mutable() + newState.get(nodeId).get + } + else null //throw exception ,node deleted + } + + } + private def isNodeExitInNewState(nodeId: Long): Boolean = { + newState.contains(nodeId) + } + private def isNodeExitInOldState(nodeId: Long): Boolean = { + oldState.contains(nodeId) + } + + override def addNode(nodeId: Long): Unit = { + //bufferedOps += BufferedAddNodeOp(nodeId) + if (isNodeExitInNewState(nodeId)) null //throw exception node already exist + else newState += nodeId -> MutableNodeWithProperties(nodeId) + } + + override def addProperty(nodeId: Long, key: String, value: Value): Unit = { + //bufferedOps += BufferedAddPropertyOp(nodeId, key, value) + val state = getPopulatedNode(nodeId) + state.props += (key -> value); + newState += nodeId -> state + } + + override def removeProperty(nodeId: Long, key: String): Unit = { + //bufferedOps += BufferedRemovePropertyOp(nodeId, key) + val state = getPopulatedNode(nodeId) + state.props -= key; + newState += nodeId -> state + } + + override def updateProperty(nodeId: Long, key: String, value: Value): Unit = { + //bufferedOps += BufferedUpdatePropertyOp(nodeId, key, value) + val state = getPopulatedNode(nodeId) + state.props += (key -> value); + newState += nodeId -> state + } + + override def addLabel(nodeId: Long, label: String): Unit = { + //bufferedOps += BufferedAddLabelOp(nodeId, label) + val state = getPopulatedNode(nodeId) + state.labels += label + newState += nodeId -> state + } + + override def removeLabel(nodeId: Long, label: String): Unit = { + //bufferedOps += BufferedRemoveLabelOp(nodeId, label) + //getPopulatedNode(nodeId).labels -= label + val state = getPopulatedNode(nodeId) + state.labels -= label + newState += nodeId -> state + } + + def getNodeLabels(nodeId: Long): Array[String] = { + getPopulatedNode(nodeId).labels.toArray + } + + def getPropertyValue(nodeId: Long, key: String): Option[Value] = { + getPopulatedNode(nodeId).props.get(key) + } + + @throws[FailedToCommitTransaction] + def commit(): mutable.Undoable = { + val ops: GroupedOps = GroupedOps(bufferedOps.toArray) + ops.newState = this.newState + ops.oldState = this.oldState + if (!isCommited) { + doPerformerWork(ops, commitPerformer) + isCommited = true + } + else { + Unit //throw exception,cannot commit twice + } + new mutable.Undoable() { + def undo(): Unit = { + if (isCommited) { + doPerformerWork(ops, undoPerformer) + isCommited = false + } + } + } + } + + @throws[FailedToRollbackTransaction] + def rollback(): Unit = { + } + + def close(): Unit = { + bufferedOps.clear() + newState.clear() + oldState.clear() + } + + private def doPerformerWork(ops: GroupedOps, performer: GroupedOpVisitor): Unit = { + performer.start(ops) + performer.work() + performer.end(ops) + } +} + +/** + * buffer based implementation of ExternalPropertyWriteTransaction + * this is a template class which should be derived + */ + +case class GroupedOps(ops: Array[BufferedPropertyOp]) { + //commands-->combined + val addedNodes = mutable.Map[Long, GroupedAddNodeOp](); + val updatedNodes = mutable.Map[Long, GroupedUpdateNodeOp](); + val deleteNodes = ArrayBuffer[GroupedDeleteNodeOp](); + var oldState = mutable.Map[Long, MutableNodeWithProperties](); + var newState = mutable.Map[Long, MutableNodeWithProperties](); + + ops.foreach { + _ match { + case BufferedAddNodeOp(nodeId: Long) => addedNodes += nodeId -> GroupedAddNodeOp(nodeId) + case BufferedDeleteNodeOp(nodeId: Long) => deleteNodes += GroupedDeleteNodeOp(nodeId) + case BufferedDeleteNodeOp(nodeId: Long) => + addedNodes -= nodeId + updatedNodes -= nodeId + deleteNodes += GroupedDeleteNodeOp(nodeId) + case BufferedUpdatePropertyOp(nodeId: Long, key: String, value: Value) => + if (addedNodes.isDefinedAt(nodeId)) { + addedNodes(nodeId).addedProps += key -> value + } + if (updatedNodes.isDefinedAt(nodeId)) { + updatedNodes(nodeId).updatedProps += key -> value + } + case BufferedRemovePropertyOp(nodeId: Long, key: String) => + if (addedNodes.isDefinedAt(nodeId)) { + addedNodes(nodeId).addedProps -= key + } + if (updatedNodes.isDefinedAt(nodeId)) { + updatedNodes(nodeId).updatedProps -= key + } + case BufferedAddPropertyOp(nodeId: Long, key: String, value: Value) => + if (addedNodes.isDefinedAt(nodeId)) { + addedNodes(nodeId).addedProps += key -> value + } + if (updatedNodes.isDefinedAt(nodeId)) { + updatedNodes(nodeId).addedProps += key -> value + } + case BufferedAddLabelOp(nodeId: Long, label: String) => + if (addedNodes.isDefinedAt(nodeId)) { + addedNodes(nodeId).addedLabels += label + } + if (updatedNodes.isDefinedAt(nodeId)) { + updatedNodes(nodeId).addedLabels += label + } + case BufferedRemoveLabelOp(nodeId: Long, label: String) => + if (addedNodes.isDefinedAt(nodeId)) { + addedNodes(nodeId).addedLabels -= label + } + if (updatedNodes.isDefinedAt(nodeId)) { + updatedNodes(nodeId).removedLabels += label + } + } + } + + def accepts(visitor: GroupedOpVisitor): Unit = { + addedNodes.values.foreach(_.accepts(visitor)) + updatedNodes.values.foreach(_.accepts(visitor)) + deleteNodes.foreach(_.accepts(visitor)) + } +} + +trait GroupedOpVisitor { + + def start(ops: GroupedOps); + def work(); + def end(ops: GroupedOps); + + def visitAddNode(nodeId: Long, props: Map[String, Value], labels: Array[String]); + + def visitDeleteNode(nodeId: Long); + + def visitUpdateNode(nodeId: Long, addedProps: Map[String, Value], updateProps: Map[String, Value], removeProps: Array[String], + addedLabels: Array[String], removedLabels: Array[String]); +} + +/** + * buffered operation within a prepared transaction + */ +trait BufferedPropertyOp { + +} + +case class BufferedDeleteNodeOp(nodeId: Long) extends BufferedPropertyOp { + +} + +case class BufferedAddNodeOp(nodeId: Long) extends BufferedPropertyOp { + +} + +case class BufferedUpdatePropertyOp(nodeId: Long, key: String, value: Value) extends BufferedPropertyOp { + +} + +case class BufferedRemovePropertyOp(nodeId: Long, key: String) extends BufferedPropertyOp { + +} + +case class BufferedAddPropertyOp(nodeId: Long, key: String, value: Value) extends BufferedPropertyOp { + +} + +case class BufferedAddLabelOp(nodeId: Long, label: String) extends BufferedPropertyOp { + +} + +case class BufferedRemoveLabelOp(nodeId: Long, label: String) extends BufferedPropertyOp { + +} + +/** + * grouped operation to be committed + */ +trait GroupedOp { + def accepts(visitor: GroupedOpVisitor): Unit; +} + +case class GroupedAddNodeOp(nodeId: Long) extends GroupedOp { + val addedProps = mutable.Map[String, Value](); + val addedLabels = mutable.Set[String](); + + def accepts(visitor: GroupedOpVisitor): Unit = { + visitor.visitAddNode(nodeId, addedProps.toMap, addedLabels.toArray) + } +} + +case class GroupedUpdateNodeOp(nodeId: Long) extends GroupedOp { + val addedProps = mutable.Map[String, Value](); + val updatedProps = mutable.Map[String, Value](); + val removedProps = mutable.Set[String](); + val addedLabels = mutable.Set[String](); + val removedLabels = mutable.Set[String](); + + def accepts(visitor: GroupedOpVisitor): Unit = { + visitor.visitUpdateNode(nodeId, addedProps.toMap, updatedProps.toMap, + removedProps.toArray, addedLabels.toArray, removedLabels.toArray) + } +} + +case class GroupedDeleteNodeOp(nodeId: Long) extends GroupedOp { + def accepts(visitor: GroupedOpVisitor): Unit = { + visitor.visitDeleteNode(nodeId) + } +} \ No newline at end of file diff --git a/external-properties/src/main/scala/cn/pandadb/externalprops/InElasticSearchPropertyNodeStore.scala b/external-properties/src/main/scala/cn/pandadb/externalprops/InElasticSearchPropertyNodeStore.scala new file mode 100644 index 00000000..2fa34e86 --- /dev/null +++ b/external-properties/src/main/scala/cn/pandadb/externalprops/InElasticSearchPropertyNodeStore.scala @@ -0,0 +1,553 @@ +package cn.pandadb.externalprops + +import java.util + +import scala.collection.JavaConversions._ +import scala.collection.{AbstractIterator, mutable} +import scala.collection.mutable.ArrayBuffer +import org.neo4j.cypher.internal.runtime.interpreted.{NFLessThan, NFPredicate, _} +import org.neo4j.values.storable._ +import cn.pandadb.util.{Configuration, PandaModuleContext} +import com.alibaba.fastjson.JSONObject +import org.apache.http.HttpHost +import org.elasticsearch.client.{RequestOptions, RestClient, RestHighLevelClient} +import org.elasticsearch.action.admin.indices.create.{CreateIndexRequest, CreateIndexResponse} +import org.elasticsearch.action.admin.indices.get.GetIndexRequest +import org.elasticsearch.action.index.{IndexRequest, IndexResponse} +import org.elasticsearch.common.xcontent.{XContentBuilder, XContentFactory, XContentType} +import org.elasticsearch.action.get.GetRequest +import org.elasticsearch.action.update.{UpdateRequest, UpdateResponse} +import org.elasticsearch.action.delete.{DeleteRequest, DeleteResponse} +import org.elasticsearch.common.Strings +import org.elasticsearch.search.fetch.subphase.FetchSourceContext +import org.elasticsearch.action.search.{ClearScrollRequest, SearchRequest, SearchScrollRequest} +import org.elasticsearch.index.query.{QueryBuilder, QueryBuilders} +import org.elasticsearch.search.builder.SearchSourceBuilder +import org.elasticsearch.index.reindex.{BulkByScrollResponse, DeleteByQueryRequest} +import org.elasticsearch.action.support.WriteRequest +import org.elasticsearch.common.unit.{TimeValue => EsTimeValue} +import org.elasticsearch.search.{Scroll, SearchHit} + + +class InElasticSearchPropertyNodeStoreFactory extends ExternalPropertyStoreFactory { + override def create(conf: Configuration): CustomPropertyNodeStore = { + + import cn.pandadb.util.ConfigUtils._ + + val host = conf.getRequiredValueAsString("external.properties.store.es.host") + val port = conf.getRequiredValueAsInt("external.properties.store.es.port") + val schema = conf.getRequiredValueAsString("external.properties.store.es.schema") + val scrollSize = conf.getRequiredValueAsInt("external.properties.store.es.scroll.size") + val scrollContainTime = conf.getRequiredValueAsInt("external.properties.store.es.scroll.time.minutes") + val indexName = conf.getRequiredValueAsString("external.properties.store.es.index") + val typeName = conf.getRequiredValueAsString("external.properties.store.es.type") + new InElasticSearchPropertyNodeStore(host, port, indexName, typeName, schema, scrollSize, scrollContainTime) + } +} + +object EsUtil { + val idName = "id" + val labelName = "labels" + val tik = "id,labels,_version_" + val arrayName = "Array" + val dateType = "time" + + def getValueFromArray(value: Array[AnyRef]): Value = { + val typeObj = value.head + typeObj match { + case s1: java.lang.String => + val strArr = value.map(_.toString).toArray + val result = Values.stringArray(strArr: _*) + result + case s2: java.lang.Boolean => + Values.booleanArray(value.map(_.asInstanceOf[Boolean]).toArray) + case s3: java.lang.Long => + Values.longArray(value.map(_.asInstanceOf[Long]).toArray) + case s4: java.lang.Byte => + Values.byteArray(value.map(_.asInstanceOf[Byte]).toArray) + case s5: java.lang.Short => + Values.shortArray(value.map(_.asInstanceOf[Short]).toArray) + case s6: java.lang.Integer => + Values.intArray(value.map(_.asInstanceOf[Int]).toArray) + case s7: java.lang.Double => + Values.doubleArray(value.map(_.asInstanceOf[Double]).toArray) + case s8: java.lang.Float => + Values.floatArray(value.map(_.asInstanceOf[Float]).toArray) + case _ => null + } + } + + def neo4jValueToScala(value: Value): Any = { + value match { + case v: IntegralValue => v.asInstanceOf[IntegralValue].longValue() + case v: IntegralArray => + v.asInstanceOf[IntegralArray].iterator().map(v2 => v2.asInstanceOf[IntegralValue].longValue()).toArray + case v: FloatingPointValue => v.asInstanceOf[FloatingPointValue].doubleValue() + case v: FloatingPointArray => + v.asInstanceOf[FloatingPointArray].iterator().map(v2 => v2.asInstanceOf[FloatingPointValue].doubleValue()).toArray + case v: TextValue => v.asInstanceOf[TextValue].stringValue() + case v: TextArray => + v.asInstanceOf[TextArray].iterator().map(v2 => v2.asInstanceOf[TextValue].stringValue()).toArray + case v: BooleanValue => v.asInstanceOf[BooleanValue].booleanValue() + case v: BooleanArray => + v.asInstanceOf[BooleanArray].iterator().map(v2 => v2.asInstanceOf[BooleanValue].booleanValue()).toArray + case v => v.asObject() + } + } + + def sourceMapToNodeWithProperties(doc: Map[String, Object]): NodeWithProperties = { + val props = mutable.Map[String, Value]() + var id: Long = -1 + if (doc.contains(idName)) { + id = doc.get(idName).get.asInstanceOf[Int].toLong + } + val labels = ArrayBuffer[String]() + if (doc.contains(labelName)) doc.get(labelName).get.asInstanceOf[util.ArrayList[String]].foreach(u => labels += u) + doc.map(field => + if (!field._1.equals(idName) && !field._1.equals(labelName) ) { + if (field._2.isInstanceOf[util.ArrayList[Object]]) { + props(field._1) = getValueFromArray(field._2.asInstanceOf[util.ArrayList[Object]].toArray()) + } + else props(field._1) = Values.of(field._2) + } + ) + + NodeWithProperties(id.toString.toLong, props.toMap, labels) + } + + def createClient(host: String, port: Int, indexName: String, typeName: String, + schema: String = "http") : RestHighLevelClient = { + val httpHost = new HttpHost(host, port, schema) + val builder = RestClient.builder(httpHost) + val client = new RestHighLevelClient(builder) + if (!indexExists(client, indexName)) { + val res = createIndex(client, indexName, typeName) + if (!res) throw new Exception("InElasticSearchPropertyNodeStore: create index failed!") + } + client + } + + private def indexExists(client: RestHighLevelClient, indexName: String): Boolean = { + val request = new GetIndexRequest() + request.indices(indexName) + client.indices.exists(request, RequestOptions.DEFAULT) + } + + private def createIndex(client: RestHighLevelClient, indexName: String, typeName: String): Boolean = { + val indexRequest: CreateIndexRequest = new CreateIndexRequest(indexName) + indexRequest.mapping(typeName, "{\"_all\":{\"type\":\"text\"}}", XContentType.JSON) + val indexResponse: CreateIndexResponse = client.indices().create(indexRequest, RequestOptions.DEFAULT) + indexResponse.isAcknowledged + } + + def addData(client: RestHighLevelClient, indexName: String, typeName: String, id: String, builder: XContentBuilder): String = { + val indexRequest: IndexRequest = new IndexRequest(indexName, typeName, id) + indexRequest.source(builder) + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) + val indexResponse: IndexResponse = client.index(indexRequest) + indexResponse.getId + } + + def updateData(client: RestHighLevelClient, indexName: String, typeName: String, id: String, data: JSONObject): String = { + val request = new UpdateRequest(indexName, typeName, id) + request.doc(data.toString, XContentType.JSON) + request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) + val response: UpdateResponse = client.update(request, RequestOptions.DEFAULT) + response.toString + } + + def deleteData(client: RestHighLevelClient, indexName: String, typeName: String, id: String): String = { + val request: DeleteRequest = new DeleteRequest(indexName, typeName, id) + request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) + val response: DeleteResponse = client.delete(request, RequestOptions.DEFAULT) + response.toString + } + + def getData(client: RestHighLevelClient, indexName: String, typeName: String, id: String): mutable.Map[String, Object] = { + val request: GetRequest = new GetRequest(indexName, typeName, id) + val includes = Strings.EMPTY_ARRAY + val excludes = Strings.EMPTY_ARRAY + val fetchSourceContext = new FetchSourceContext(true, includes, excludes) + request.fetchSourceContext(fetchSourceContext) + val response = client.get(request, RequestOptions.DEFAULT) + response.getSource + } + + def getAllSize(client: RestHighLevelClient, indexName: String, typeName: String): Long = { + val searchRequest: SearchRequest = new SearchRequest(); + searchRequest.indices(indexName) + searchRequest.types(typeName) + val searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(QueryBuilders.matchAllQuery()); + searchSourceBuilder.fetchSource(false) + searchRequest.source(searchSourceBuilder); + val searchResponse = client.search(searchRequest, RequestOptions.DEFAULT) + searchResponse.getHits.totalHits + } + + def clearAllData(client: RestHighLevelClient, indexName: String, typeName: String): Long = { + val request: DeleteByQueryRequest = new DeleteByQueryRequest() + request.indices(indexName) + request.types(typeName) + request.setQuery(QueryBuilders.matchAllQuery()) + request.setRefresh(true) + val response: BulkByScrollResponse = client.deleteByQuery(request, RequestOptions.DEFAULT) + response.getDeleted + } + + def searchWithProperties(client: RestHighLevelClient, indexName: String, typeName: String, + queryBuilder: QueryBuilder, scrollSize: Int, scrollContainTimeMinutes: Int): Iterable[NodeWithProperties] = { + (new SearchResultsIterator(client, indexName, typeName, queryBuilder, scrollSize, scrollContainTimeMinutes)).toIterable + } + + def searchNodeId(client: RestHighLevelClient, indexName: String, typeName: String, + queryBuilder: QueryBuilder, scrollSize: Int, scrollContainTimeMinutes: Int): Iterable[Long] = { + (new SearchNodeIdResultsIterator(client, indexName, typeName, queryBuilder, scrollSize, scrollContainTimeMinutes)).toIterable + } + + class SearchResultsIterator(client: RestHighLevelClient, indexName: String, typeName: String, queryBuilder: QueryBuilder, + scrollSize: Int, scrollContainTimeMinutes: Int) extends AbstractIterator[NodeWithProperties] { + private val searchRequest = new SearchRequest() + searchRequest.indices(indexName) + searchRequest.types(typeName) + private val searchSourceBuilder = new SearchSourceBuilder() + private val scroll = new Scroll(EsTimeValue.timeValueMinutes(scrollContainTimeMinutes)) + searchSourceBuilder.query(queryBuilder) + searchSourceBuilder.size(scrollSize) + searchRequest.source(searchSourceBuilder) + searchRequest.scroll(scroll) + private var searchResponse = client.search(searchRequest, RequestOptions.DEFAULT) + private var scrollId = searchResponse.getScrollId + private var searchHits = searchResponse.getHits.getHits + private var lastHitsSize = searchHits.size + private var hitsIterator = searchHits.toIterator + + private def doScroll(): Boolean = { + if (lastHitsSize > 0) { + val scrollRequest = new SearchScrollRequest(scrollId) + scrollRequest.scroll(scroll) + searchResponse = client.scroll(scrollRequest, RequestOptions.DEFAULT) + scrollId = searchResponse.getScrollId + searchHits = searchResponse.getHits.getHits + lastHitsSize = searchHits.size + hitsIterator = searchHits.toIterator + lastHitsSize > 0 + } + else { + val clearScrollRequest = new ClearScrollRequest + clearScrollRequest.addScrollId(scrollId) + val clearScrollResponse = client.clearScroll(clearScrollRequest, RequestOptions.DEFAULT) + clearScrollResponse.isSucceeded + false + } + } + + override def hasNext: Boolean = hitsIterator.hasNext || doScroll() + + override def next(): NodeWithProperties = { + val h = hitsIterator.next() + EsUtil.sourceMapToNodeWithProperties(h.getSourceAsMap.toMap) + } + } + + class SearchNodeIdResultsIterator(client: RestHighLevelClient, indexName: String, typeName: String, queryBuilder: QueryBuilder, + scrollSize: Int, scrollContainTimeMinutes: Int) extends AbstractIterator[Long] { + private val searchRequest = new SearchRequest() + searchRequest.indices(indexName) + searchRequest.types(typeName) + private val searchSourceBuilder = new SearchSourceBuilder() + private val scroll = new Scroll(EsTimeValue.timeValueMinutes(scrollContainTimeMinutes)) + searchSourceBuilder.query(queryBuilder) + val fields = Array[String](idName) + searchSourceBuilder.fetchSource(fields, null) + searchSourceBuilder.size(scrollSize) + searchRequest.source(searchSourceBuilder) + searchRequest.scroll(scroll) + private var searchResponse = client.search(searchRequest, RequestOptions.DEFAULT) + private var scrollId = searchResponse.getScrollId + private var searchHits: Array[SearchHit] = searchResponse.getHits.getHits + private var lastHitsSize = searchHits.size + private var hitsIterator = searchHits.toIterator + + private def doScroll(): Boolean = { + if (lastHitsSize > 0) { + val scrollRequest = new SearchScrollRequest(scrollId) + scrollRequest.scroll(scroll) + searchResponse = client.scroll(scrollRequest, RequestOptions.DEFAULT) + scrollId = searchResponse.getScrollId + searchHits = searchResponse.getHits.getHits + lastHitsSize = searchHits.size + hitsIterator = searchHits.toIterator + lastHitsSize > 0 + } + else { + val clearScrollRequest = new ClearScrollRequest + clearScrollRequest.addScrollId(scrollId) + val clearScrollResponse = client.clearScroll(clearScrollRequest, RequestOptions.DEFAULT) + clearScrollResponse.isSucceeded + false + } + } + + override def hasNext: Boolean = hitsIterator.hasNext || doScroll() + + override def next(): Long = { + val h = hitsIterator.next() + val doc = h.getSourceAsMap.toMap + var id: Long = -1 + if (doc.contains(idName)) { + id = doc.get(idName).get.asInstanceOf[Int].toLong + } + id + } + } + +} + +class InElasticSearchPropertyNodeStore(host: String, port: Int, indexName: String, typeName: String, + schema: String = "http", scrollSize: Int = 100, scrollContainTimeMinutes: Int = 10) extends CustomPropertyNodeStore { + //initialize solr connection + val esClient = EsUtil.createClient(host, port, indexName, typeName, schema) + + def deleteNodes(docsToBeDeleted: Iterable[Long]): Unit = { + docsToBeDeleted.foreach(node => EsUtil.deleteData(esClient, indexName, typeName, node.toString)) + } + + def clearAll(): Unit = { + EsUtil.clearAllData(esClient, indexName, typeName) + } + + def getRecorderSize(): Long = { + EsUtil.getAllSize(esClient, indexName, typeName) + } + + private def predicate2EsQuery(expr: NFPredicate): QueryBuilder = { + expr match { + case expr: NFGreaterThan => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + QueryBuilders.rangeQuery(paramKey).gt(paramValue) + case expr: NFGreaterThanOrEqual => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + QueryBuilders.rangeQuery(paramKey).gte(paramValue) + case expr: NFLessThan => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + QueryBuilders.rangeQuery(paramKey).lt(paramValue) + case expr: NFLessThanOrEqual => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + QueryBuilders.rangeQuery(paramKey).lte(paramValue) + case expr: NFEquals => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + QueryBuilders.termQuery(paramKey, paramValue) + case expr: NFNotEquals => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery(paramKey, paramValue)) + case expr: NFNotNull => + val paramKey = expr.propName + QueryBuilders.existsQuery(paramKey) + case expr: NFIsNull => + val paramKey = expr.propName + QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery(paramKey)) + case expr: NFTrue => + QueryBuilders.matchAllQuery() + case expr: NFFalse => + QueryBuilders.boolQuery().mustNot(QueryBuilders.matchAllQuery()) + case expr: NFStartsWith => + val paramValue = expr.text + val paramKey = expr.propName + QueryBuilders.prefixQuery(paramKey, paramValue) + case expr: NFEndsWith => + val paramValue = expr.text + val paramKey = expr.propName + QueryBuilders.regexpQuery(paramKey + ".keyword", ".*" + paramValue) + case expr: NFHasProperty => + val paramKey = expr.propName + QueryBuilders.existsQuery(paramKey) + case expr: NFContainsWith => + val paramValue = expr.text + val paramKey = expr.propName + if (paramKey.equals(EsUtil.labelName)) { + QueryBuilders.commonTermsQuery(paramKey, paramValue) + } + else { + QueryBuilders.regexpQuery(paramKey + ".keyword", ".*" + paramValue + ".*") + } + case expr: NFRegexp => + val paramValue = expr.text + val paramKey = expr.propName + QueryBuilders.regexpQuery(paramKey, paramValue) + case expr: NFAnd => + val q1 = predicate2EsQuery(expr.a) + val q2 = predicate2EsQuery(expr.b) + QueryBuilders.boolQuery().must(q1).must(q2) + case expr: NFOr => + val q1 = predicate2EsQuery(expr.a) + val q2 = predicate2EsQuery(expr.b) + QueryBuilders.boolQuery().should(q1).should(q2) + case expr: NFNot => + val q1 = predicate2EsQuery(expr.a) + QueryBuilders.boolQuery().mustNot(q1) + } + } + + private def filterNodesWithProperties(expr: NFPredicate): Iterable[NodeWithProperties] = { + val q = predicate2EsQuery(expr) + EsUtil.searchWithProperties(esClient, indexName, typeName, q, scrollSize, scrollContainTimeMinutes) + } + + override def filterNodes(expr: NFPredicate): Iterable[Long] = { + val q = predicate2EsQuery(expr) + EsUtil.searchNodeId(esClient, indexName, typeName, q, scrollSize, scrollContainTimeMinutes) + } + + override def getNodesByLabel(label: String): Iterable[Long] = { + val propName = EsUtil.labelName + filterNodes(NFContainsWith(propName, label)) + } + + override def getNodeBylabelAndFilter(label: String, expr: NFPredicate): Iterable[Long] = { + val propName = EsUtil.labelName + filterNodes(NFAnd(NFContainsWith(propName, label), expr)) + } + + override def getNodeById(id: Long): Option[NodeWithProperties] = { + val propName = EsUtil.idName + filterNodesWithProperties(NFEquals(propName, Values.of(id))).headOption + } + + // for tests + def filterNodesWithProperties(query: QueryBuilder): Iterable[NodeWithProperties] = { + EsUtil.searchWithProperties(esClient, indexName, typeName, query, scrollSize, scrollContainTimeMinutes) + } + + // for tests + def filterNodes(query: QueryBuilder): Iterable[Long] = { + EsUtil.searchNodeId(esClient, indexName, typeName, query, scrollSize, scrollContainTimeMinutes) + } + + // for tests + def getNodesWithPropertiesByLabel(label: String): Iterable[NodeWithProperties] = { + val propName = EsUtil.labelName + filterNodesWithProperties(NFContainsWith(propName, label)) + } + + override def close(ctx: PandaModuleContext): Unit = { + esClient.close() + } + + override def start(ctx: PandaModuleContext): Unit = { + } + + override def beginWriteTransaction(): PropertyWriteTransaction = { + new BufferedExternalPropertyWriteTransaction(this, + new InEsGroupedOpVisitor(true, esClient, indexName, typeName), + new InEsGroupedOpVisitor(false, esClient, indexName, typeName)) + } +} + +class InEsGroupedOpVisitor(isCommit: Boolean, esClient: RestHighLevelClient, indexName: String, typeName: String) + extends GroupedOpVisitor { + + var oldState = mutable.Map[Long, MutableNodeWithProperties]() + var newState = mutable.Map[Long, MutableNodeWithProperties]() + + def addNodes(docsToAdded: Iterable[NodeWithProperties]): Unit = { + docsToAdded.map { x => + val builder = XContentFactory.jsonBuilder + builder.startObject + builder.field(EsUtil.idName, x.id) + builder.field(EsUtil.labelName, x.labels.toArray[String]) + x.props.foreach(y => { + builder.field(y._1, EsUtil.neo4jValueToScala(y._2)) + }) + builder.endObject() + EsUtil.addData(esClient, indexName, typeName, x.id.toString, builder) + } + } + + def getNodeWithPropertiesById(nodeId: Long): NodeWithProperties = { + val dataMap = EsUtil.getData(esClient, indexName, typeName, nodeId.toString) + EsUtil.sourceMapToNodeWithProperties(dataMap.toMap) + } + + def deleteNodes(docsToBeDeleted: Iterable[Long]): Unit = { + docsToBeDeleted.foreach(node => EsUtil.deleteData(esClient, indexName, typeName, node.toString)) + } + + override def start(ops: GroupedOps): Unit = { + this.oldState = ops.oldState + this.newState = ops.newState + } + + override def end(ops: GroupedOps): Unit = { + + } + + override def visitAddNode(nodeId: Long, props: Map[String, Value], labels: Array[String]): Unit = { + if (isCommit) addNodes(Iterable(NodeWithProperties(nodeId, props, labels))) + else visitDeleteNode(nodeId) + } + + override def visitDeleteNode(nodeId: Long): Unit = { + if (isCommit) deleteNodes(Iterable(nodeId)) + else { + val oldNode = oldState.get(nodeId).head + addNodes(Iterable(NodeWithProperties(nodeId, oldNode.props.toMap, oldNode.labels))) + } + } + + def getEsNodeById(id: Long): Map[String, Object] = { + EsUtil.getData(esClient, indexName, typeName, id.toString).toMap + } + + override def visitUpdateNode(nodeId: Long, addedProps: Map[String, Value], + updateProps: Map[String, Value], removeProps: Array[String], + addedLabels: Array[String], removedLabels: Array[String]): Unit = { + + if (isCommit) { + val doc = getEsNodeById(nodeId) + + val node = EsUtil.sourceMapToNodeWithProperties(doc) + val mutiNode = node.mutable() + mutiNode.props ++= addedProps + mutiNode.props ++= updateProps + mutiNode.props --= removeProps + mutiNode.labels ++= addedLabels + mutiNode.labels --= removedLabels + + visitAddNode(nodeId, mutiNode.props.toMap, mutiNode.labels.toArray) + } + + else { + visitDeleteNode(nodeId) + val oldNode = oldState.get(nodeId).head + addNodes(Iterable(NodeWithProperties(nodeId, oldNode.props.toMap, oldNode.labels))) + } + + } + + override def work(): Unit = { + val nodeToAdd = ArrayBuffer[NodeWithProperties]() + val nodeToDelete = ArrayBuffer[Long]() + if (isCommit) { + newState.foreach(tle => nodeToAdd += NodeWithProperties(tle._1, tle._2.props.toMap, tle._2.labels)) + oldState.foreach(tle => { + if (!newState.contains(tle._1)) nodeToDelete += tle._1 + }) + } + else { + oldState.foreach(tle => nodeToAdd += NodeWithProperties(tle._1, tle._2.props.toMap, tle._2.labels)) + newState.foreach(tle => { + if (!oldState.contains(tle._1)) nodeToDelete += tle._1 + }) + } + + if (!nodeToAdd.isEmpty) this.addNodes(nodeToAdd) + if (!nodeToDelete.isEmpty) this.deleteNodes(nodeToDelete) + } +} \ No newline at end of file diff --git a/external-properties/src/main/scala/cn/pandadb/externalprops/InMemoryPropertyNodeStore.scala b/external-properties/src/main/scala/cn/pandadb/externalprops/InMemoryPropertyNodeStore.scala new file mode 100644 index 00000000..4a8140a9 --- /dev/null +++ b/external-properties/src/main/scala/cn/pandadb/externalprops/InMemoryPropertyNodeStore.scala @@ -0,0 +1,228 @@ +package cn.pandadb.externalprops + +import cn.pandadb.util.{Configuration, PandaModuleContext} +import org.neo4j.cypher.internal.runtime.interpreted._ +import org.neo4j.values.AnyValue +import org.neo4j.values.storable.{NumberValue, StringValue, Value} + +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer + +/** + * Created by bluejoe on 2019/10/7. + */ +class InMemoryPropertyNodeStoreFactory extends ExternalPropertyStoreFactory { + override def create(conf: Configuration): CustomPropertyNodeStore = InMemoryPropertyNodeStore; +} + +/** + * used for unit test + */ +object InMemoryPropertyNodeStore extends CustomPropertyNodeStore { + val nodes = mutable.Map[Long, NodeWithProperties](); + + def firstFilterNodes(expr: NFPredicate): Iterable[NodeWithProperties] = { + expr match { + case NFGreaterThan(fieldName: String, value: AnyValue) => + nodes.values.filter(x => x.mutable().props.get(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() > + value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) + + /*case NFLessThan(fieldName: String, value: AnyValue) => + nodes.values.filter(x => x.field(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() < + value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false))*/ + case NFLessThan(fieldName: String, value: AnyValue) => + nodes.values.filter(x => x.mutable().props.get(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() < + value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) + + case NFLessThanOrEqual(fieldName: String, value: AnyValue) => + nodes.values.filter(x => x.mutable().props.get(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() <= + value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) + + case NFGreaterThanOrEqual(fieldName: String, value: AnyValue) => + nodes.values.filter(x => x.mutable().props.get(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() >= + value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) + + /* case NFEquals(fieldName: String, value: AnyValue) => + nodes.values.filter(x => x.mutable().props.get(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() == + value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false))*/ + case NFEquals(fieldName: String, value: AnyValue) => + nodes.values.filter(x => x.mutable().props.get(fieldName).map(_.asObject().toString == + value.asInstanceOf[Value].asObject().toString).getOrElse(false)) + case NFContainsWith(propName, text) => + nodes.values.filter(x => x.mutable().props.get(propName).map(_.asInstanceOf[StringValue].stringValue().contains(text) + ).getOrElse(false)) + case NFStartsWith(propName, text) => + nodes.values.filter(x => x.mutable().props.get(propName).map(_.asInstanceOf[StringValue].stringValue().startsWith(text) + ).getOrElse(false)) + + case NFEndsWith(propName, text) => + nodes.values.filter(x => x.mutable().props.get(propName).map(_.asInstanceOf[StringValue].stringValue().endsWith(text) + ).getOrElse(false)) + } + } + + def filterNodesWithProperties(expr: NFPredicate): Iterable[NodeWithProperties] = { + expr match { + case NFAnd(a, b) => filterNodesWithProperties(a).toSet & filterNodesWithProperties(b).toSet + case NFNot(a) => nodes.values.toSet -- firstFilterNodes(a) + case NFOr(a, b) => filterNodesWithProperties(a).toSet | filterNodesWithProperties(b).toSet + case _ => firstFilterNodes(expr) + } + } + + override def filterNodes(expr: NFPredicate): Iterable[Long] = { + filterNodesWithProperties(expr).map(n => n.id) + } + + def deleteNodes(docsToBeDeleted: Iterable[Long]): Unit = { + nodes --= docsToBeDeleted + } + + def addNodes(docsToAdded: Iterable[NodeWithProperties]): Unit = { + nodes ++= docsToAdded.map(x => x.id -> x) + } + + def updateNodes(nodeId: Long, addedProps: Map[String, Value], + updateProps: Map[String, Value], removeProps: Array[String], + addedLabels: Array[String], removedLabels: Array[String]): Unit = { + + val n: MutableNodeWithProperties = nodes(nodeId).mutable() + if (addedProps != null && addedProps.size > 0) { + n.props ++= addedProps + } + if (updateProps != null && updateProps.size > 0) { + n.props ++= updateProps + } + if (removeProps != null && removeProps.size > 0) { + removeProps.foreach(f => n.props -= f) + } + if (addedLabels != null && addedLabels.size > 0) { + n.labels ++= addedLabels + // nodes(nodeId).labels = nodes(nodeId).labels.toSet + } + if (removedLabels != null && removedLabels.size > 0) { + //val tmpLabels = nodes(nodeId).labels.toSet + n.labels --= removedLabels + } + deleteNodes(Iterable(nodeId)) + addNodes(Iterable(NodeWithProperties(nodeId, n.props.toMap, n.labels))) + + } + + def getNodeWithPropertiesBylabelAndFilter(label: String, expr: NFPredicate): Iterable[NodeWithProperties] = { + //val propName = SolrUtil.labelName + //filterNodes(NFAnd(NFContainsWith(propName, label), expr)) + getNodesWithPropertiesByLabel(label).toSet & filterNodesWithProperties(expr).toSet + } + + override def getNodeBylabelAndFilter(label: String, expr: NFPredicate): Iterable[Long] = { + getNodeWithPropertiesBylabelAndFilter(label, expr).map(n => n.id) + } + + def getNodesWithPropertiesByLabel(label: String): Iterable[NodeWithProperties] = { + val res = mutable.ArrayBuffer[NodeWithProperties]() + nodes.map(n => { + //println(n) + if (n._2.labels.toArray.contains(label)) { + res.append(n._2) + } + }) + res + } + + override def getNodesByLabel(label: String): Iterable[Long] = { + getNodesWithPropertiesByLabel(label).map(n => n.id) + } + + override def getNodeById(id: Long): Option[NodeWithProperties] = { + nodes.get(id) + } + + override def start(ctx: PandaModuleContext): Unit = { + nodes.clear() + } + + override def close(ctx: PandaModuleContext): Unit = { + nodes.clear() + } + + override def beginWriteTransaction(): PropertyWriteTransaction = { + new BufferedExternalPropertyWriteTransaction(this, new InMemoryGroupedOpVisitor(true, nodes), new InMemoryGroupedOpVisitor(false, nodes)) + } +} + +class InMemoryGroupedOpVisitor(isCommit: Boolean, nodes: mutable.Map[Long, NodeWithProperties]) extends GroupedOpVisitor { + + var oldState = mutable.Map[Long, MutableNodeWithProperties](); + var newState = mutable.Map[Long, MutableNodeWithProperties](); + + override def start(ops: GroupedOps): Unit = { + + this.oldState = ops.oldState + this.newState = ops.newState + + } + + override def end(ops: GroupedOps): Unit = { + + //this.oldState.clear() + //this.newState.clear() + + } + + override def visitAddNode(nodeId: Long, props: Map[String, Value], labels: Array[String]): Unit = { + if (isCommit) InMemoryPropertyNodeStore.addNodes(Iterable(NodeWithProperties(nodeId, props, labels))) + else { + InMemoryPropertyNodeStore.deleteNodes(Iterable(nodeId)) + } + + } + + override def visitDeleteNode(nodeId: Long): Unit = { + if (isCommit) InMemoryPropertyNodeStore.deleteNodes(Iterable(nodeId)) + else { + + val oldNode = oldState.get(nodeId).head + InMemoryPropertyNodeStore.addNodes(Iterable(NodeWithProperties(nodeId, oldNode.props.toMap, oldNode.labels))) + + } + } + + override def visitUpdateNode(nodeId: Long, addedProps: Map[String, Value], + updateProps: Map[String, Value], removeProps: Array[String], + addedLabels: Array[String], removedLabels: Array[String]): Unit = { + if (isCommit) InMemoryPropertyNodeStore.updateNodes(nodeId: Long, addedProps: Map[String, Value], + updateProps: Map[String, Value], removeProps: Array[String], + addedLabels: Array[String], removedLabels: Array[String]) + else { + + val oldNode = oldState.get(nodeId).head + InMemoryPropertyNodeStore.addNodes(Iterable(NodeWithProperties(nodeId, oldNode.props.toMap, oldNode.labels))) + + } + } + + override def work(): Unit = { + + val nodeToAdd = ArrayBuffer[NodeWithProperties]() + val nodeToDelete = ArrayBuffer[Long]() + if (isCommit) { + + newState.foreach(tle => nodeToAdd += NodeWithProperties(tle._1, tle._2.props.toMap, tle._2.labels)) + oldState.foreach(tle => { + if (!newState.contains(tle._1)) nodeToDelete += tle._1 + }) + } + else { + + oldState.foreach(tle => nodeToAdd += NodeWithProperties(tle._1, tle._2.props.toMap, tle._2.labels)) + newState.foreach(tle => { + if (!oldState.contains(tle._1)) nodeToDelete += tle._1 + }) + } + + InMemoryPropertyNodeStore.addNodes(nodeToAdd) + InMemoryPropertyNodeStore.deleteNodes(nodeToDelete) + + } +} \ No newline at end of file diff --git a/external-properties/src/main/scala/cn/pandadb/externalprops/InSolrPropertyNodeStore.scala b/external-properties/src/main/scala/cn/pandadb/externalprops/InSolrPropertyNodeStore.scala new file mode 100644 index 00000000..b3f1216f --- /dev/null +++ b/external-properties/src/main/scala/cn/pandadb/externalprops/InSolrPropertyNodeStore.scala @@ -0,0 +1,379 @@ +package cn.pandadb.externalprops + +import java.util + +import cn.pandadb.util.ConfigUtils._ +import cn.pandadb.util.{Configuration, PandaModuleContext} +import org.apache.solr.client.solrj.SolrQuery +import org.apache.solr.client.solrj.impl.CloudSolrClient +import org.apache.solr.common.{SolrDocument, SolrInputDocument} +import org.neo4j.cypher.internal.runtime.interpreted.{NFLessThan, NFPredicate, _} +import org.neo4j.values.storable.{ArrayValue, Value, Values} + +import scala.collection.JavaConversions._ +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer + + +class InSolrPropertyNodeStoreFactory extends ExternalPropertyStoreFactory { + override def create(conf: Configuration): CustomPropertyNodeStore = { + + import cn.pandadb.util.ConfigUtils._ + + val zkAddr = conf.getRequiredValueAsString("external.properties.store.solr.zk") + val zkCollection = conf.getRequiredValueAsString("external.properties.store.solr.collection") + new InSolrPropertyNodeStore(zkAddr, zkCollection) + } +} + +object SolrUtil { + val idName = "id" + val labelName = "labels" + val tik = "id,labels,_version_" + val arrayName = "Array" + val dateType = "time" + val maxRows = 50000000 + + def solrDoc2nodeWithProperties(doc: SolrDocument): NodeWithProperties = { + val props = mutable.Map[String, Value]() + val id = doc.get(idName) + val labels = ArrayBuffer[String]() + if (doc.get(labelName) != null) doc.get(labelName).asInstanceOf[util.ArrayList[String]].foreach(u => labels += u) + val fieldsName = doc.getFieldNames + fieldsName.foreach(y => { + if (tik.indexOf(y) < 0) { + if (doc.get(y).getClass.getName.contains(arrayName)) { + val tempArray = ArrayBuffer[AnyRef]() + doc.get(y).asInstanceOf[util.ArrayList[AnyRef]].foreach(u => tempArray += u) + if (tempArray.size <= 1) props += y -> Values.of(tempArray.head) + else props += y -> getValueFromArray(tempArray) + } + else props += y -> Values.of(doc.get(y)) + } + }) + NodeWithProperties(id.toString.toLong, props.toMap, labels) + } + + def getValueFromArray(value: ArrayBuffer[AnyRef]): Value = { + val typeObj = value.head + typeObj match { + case s1: java.lang.String => + val strArr = value.map(_.toString).toArray + val result = Values.stringArray(strArr: _*) + result + case s2: java.lang.Boolean => + Values.booleanArray(value.map(_.asInstanceOf[Boolean]).toArray) + case s3: java.lang.Long => + Values.longArray(value.map(_.asInstanceOf[Long]).toArray) + case s4: java.lang.Byte => + Values.byteArray(value.map(_.asInstanceOf[Byte]).toArray) + case s5: java.lang.Short => + Values.shortArray(value.map(_.asInstanceOf[Short]).toArray) + case s6: java.lang.Integer => + Values.intArray(value.map(_.asInstanceOf[Int]).toArray) + case s7: java.lang.Double => + Values.doubleArray(value.map(_.asInstanceOf[Double]).toArray) + case s8: java.lang.Float => + Values.floatArray(value.map(_.asInstanceOf[Float]).toArray) + case _ => null + } + } + +} + +/** + * Created by bluejoe on 2019/10/7. + */ +class InSolrPropertyNodeStore(zkUrl: String, collectionName: String) extends CustomPropertyNodeStore { + //initialize solr connection + val _solrClient = { + val client = new CloudSolrClient(zkUrl); + client.setZkClientTimeout(30000); + client.setZkConnectTimeout(50000); + client.setDefaultCollection(collectionName); + client + } + + def deleteNodes(docsToBeDeleted: Iterable[Long]): Unit = { + _solrClient.deleteById(docsToBeDeleted.map(_.toString).toList); + _solrClient.commit(); + } + + def clearAll(): Unit = { + _solrClient.deleteByQuery("*:*") + _solrClient.commit() + } + + def getRecorderSize: Int = { + val query = "*:*" + _solrClient.query(new SolrQuery().setQuery(query)).getResults().getNumFound.toInt + } + + def addNodes(docsToAdded: Iterable[NodeWithProperties]): Unit = { + + _solrClient.add(docsToAdded.map { x => + val doc = new SolrInputDocument(); + x.props.foreach(y => { + if (Values.isArrayValue(y._2)) { + y._2.asInstanceOf[ArrayValue].foreach(u => doc.addField(y._1, u.asInstanceOf[Value].asObject())) + } + else doc.addField(y._1, y._2.asObject()) + }); + doc.addField(SolrUtil.idName, x.id); + x.labels.foreach(label => doc.addField(SolrUtil.labelName, label)) + doc + }) + + _solrClient.commit(); + + } + + private def predicate2SolrQuery(expr: NFPredicate): String = { + var q: Option[String] = None + expr match { + case expr: NFGreaterThan => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + q = Some(s"$paramKey:{ $paramValue TO * }") + case expr: NFGreaterThanOrEqual => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + q = Some(s"$paramKey:[ $paramValue TO * ]") + case expr: NFLessThan => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + q = Some(s"$paramKey:{ * TO $paramValue}") + case expr: NFLessThanOrEqual => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + q = Some(s"$paramKey:[ * TO $paramValue]") + case expr: NFEquals => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + q = Some(s"$paramKey:$paramValue") + case expr: NFNotEquals => + val paramValue = expr.value.asInstanceOf[Value].asObject() + val paramKey = expr.propName + q = Some(s"-$paramKey:$paramValue") + case expr: NFNotNull => + val paramKey = expr.propName + q = Some(s"$paramKey:*") + case expr: NFIsNull => + val paramKey = expr.propName + q = Some(s"-$paramKey:*") + case expr: NFTrue => + q = Some(s"*:*") + case expr: NFFalse => + q = Some(s"-*:*") + case expr: NFStartsWith => + val paramValue = expr.text + val paramKey = expr.propName + q = Some(s"$paramKey:$paramValue*") + case expr: NFEndsWith => + val paramValue = expr.text + val paramKey = expr.propName + q = Some(s"$paramKey:*$paramValue") + case expr: NFHasProperty => + val paramKey = expr.propName + q = Some(s"$paramKey:[* TO *]") + case expr: NFContainsWith => + val paramValue = expr.text + val paramKey = expr.propName + q = Some(s"$paramKey:*$paramValue*") + case expr: NFRegexp => + val paramValue = expr.text.replace(".", "") + val paramKey = expr.propName + q = Some(s"$paramKey:$paramValue") + case expr: NFAnd => + val q1 = predicate2SolrQuery(expr.a) + val q2 = predicate2SolrQuery(expr.b) + q = Some(s"($q1 && $q2)") + case expr: NFOr => + val q1 = predicate2SolrQuery(expr.a) + val q2 = predicate2SolrQuery(expr.b) + q = Some(s"($q1 || $q2)") + case expr: NFNot => + val q1 = predicate2SolrQuery(expr.a) + q = if (q1.indexOf("-") >= 0) Some(s"${q1.substring(q1.indexOf("-") + 1)}") else Some(s"-$q1") + case _ => q = None + } + q.get + } + + def filterNodesWithProperties(expr: NFPredicate): Iterable[NodeWithProperties] = { + + var q: Option[String] = None; + expr match { + case expr: NFAnd => + val q1 = predicate2SolrQuery(expr.a) + val q2 = predicate2SolrQuery(expr.b) + q = Some(s"($q1 && $q2)") + case expr: NFOr => + val q1 = predicate2SolrQuery(expr.a) + val q2 = predicate2SolrQuery(expr.b) + q = Some(s"($q1 || $q2)") + + case expr: NFNot => + val q1 = predicate2SolrQuery(expr.a) + q = if (q1.indexOf("-") >= 0) Some(s"${q1.substring(q1.indexOf("-") + 1)}") else Some(s"-$q1") + + case _ => + val q1 = predicate2SolrQuery(expr) + q = Some(s"$q1") + + } + val query = new SolrQuery() + query.setQuery(q.get) + val res = new SolrQueryResults(_solrClient, query, 10000) + res.iterator2().toIterable + } + + override def filterNodes(expr: NFPredicate): Iterable[Long] = { + filterNodesWithProperties(expr).map(n => n.id) + } + + def getNodesWithPropertiesByLabel(label: String): Iterable[NodeWithProperties] = { + val propName = SolrUtil.labelName + filterNodesWithProperties(NFContainsWith(propName, label)) + } + + override def getNodesByLabel(label: String): Iterable[Long] = { + getNodesWithPropertiesByLabel(label).map(n => n.id) + } + + def getNodeWithPropertiesBylabelAndFilter(label: String, expr: NFPredicate): Iterable[NodeWithProperties] = { + val propName = SolrUtil.labelName + filterNodesWithProperties(NFAnd(NFContainsWith(propName, label), expr)) + } + + override def getNodeBylabelAndFilter(label: String, expr: NFPredicate): Iterable[Long] = { + getNodeWithPropertiesBylabelAndFilter(label, expr).map(n => n.id) + } + + override def getNodeById(id: Long): Option[NodeWithProperties] = { + val propName = SolrUtil.idName + filterNodesWithProperties(NFEquals(propName, Values.of(id))).headOption + } + + override def close(ctx: PandaModuleContext): Unit = { + //_solrClient.close() + } + + override def start(ctx: PandaModuleContext): Unit = { + } + + override def beginWriteTransaction(): PropertyWriteTransaction = { + new BufferedExternalPropertyWriteTransaction(this, new InSolrGroupedOpVisitor(true, _solrClient), new InSolrGroupedOpVisitor(false, _solrClient)) + } +} + +class InSolrGroupedOpVisitor(isCommit: Boolean, _solrClient: CloudSolrClient) extends GroupedOpVisitor { + + var oldState = mutable.Map[Long, MutableNodeWithProperties](); + var newState = mutable.Map[Long, MutableNodeWithProperties](); + + def addNodes(docsToAdded: Iterable[NodeWithProperties]): Unit = { + + _solrClient.add(docsToAdded.map { x => + val doc = new SolrInputDocument(); + x.props.foreach(y => { + if (Values.isArrayValue(y._2)) { + y._2.asInstanceOf[ArrayValue].foreach(u => doc.addField(y._1, u.asInstanceOf[Value].asObject())) + } + else doc.addField(y._1, y._2.asObject()) + }); + doc.addField(SolrUtil.idName, x.id); + x.labels.foreach(label => doc.addField(SolrUtil.labelName, label)) + doc + }) + _solrClient.commit(); + + } + + def getNodeWithPropertiesById(nodeId: Long): NodeWithProperties = { + val doc = _solrClient.getById(nodeId.toString) + SolrUtil.solrDoc2nodeWithProperties(doc) + } + + def deleteNodes(docsToBeDeleted: Iterable[Long]): Unit = { + _solrClient.deleteById(docsToBeDeleted.map(_.toString).toList); + _solrClient.commit(); + } + + override def start(ops: GroupedOps): Unit = { + + this.oldState = ops.oldState + this.newState = ops.newState + + } + + override def end(ops: GroupedOps): Unit = { + + } + + override def visitAddNode(nodeId: Long, props: Map[String, Value], labels: Array[String]): Unit = { + if (isCommit) addNodes(Iterable(NodeWithProperties(nodeId, props, labels))) + else visitDeleteNode(nodeId) + } + + override def visitDeleteNode(nodeId: Long): Unit = { + if (isCommit) deleteNodes(Iterable(nodeId)) + else { + val oldNode = oldState.get(nodeId).head + addNodes(Iterable(NodeWithProperties(nodeId, oldNode.props.toMap, oldNode.labels))) + } + } + + def getSolrNodeById(id: Long): SolrDocument = { + _solrClient.getById(id.toString) + } + + override def visitUpdateNode(nodeId: Long, addedProps: Map[String, Value], + updateProps: Map[String, Value], removeProps: Array[String], + addedLabels: Array[String], removedLabels: Array[String]): Unit = { + + if (isCommit) { + + val doc = getSolrNodeById(nodeId) + + val node = SolrUtil.solrDoc2nodeWithProperties(doc) + val mutiNode = node.mutable() + mutiNode.props ++= addedProps + mutiNode.props ++= updateProps + mutiNode.props --= removeProps + mutiNode.labels ++= addedLabels + mutiNode.labels --= removedLabels + + visitAddNode(nodeId, mutiNode.props.toMap, mutiNode.labels.toArray) + } + + else { + visitDeleteNode(nodeId) + val oldNode = oldState.get(nodeId).head + addNodes(Iterable(NodeWithProperties(nodeId, oldNode.props.toMap, oldNode.labels))) + } + + } + + override def work(): Unit = { + val nodeToAdd = ArrayBuffer[NodeWithProperties]() + val nodeToDelete = ArrayBuffer[Long]() + if (isCommit) { + + newState.foreach(tle => nodeToAdd += NodeWithProperties(tle._1, tle._2.props.toMap, tle._2.labels)) + oldState.foreach(tle => { + if (!newState.contains(tle._1)) nodeToDelete += tle._1 + }) + } + else { + + oldState.foreach(tle => nodeToAdd += NodeWithProperties(tle._1, tle._2.props.toMap, tle._2.labels)) + newState.foreach(tle => { + if (!oldState.contains(tle._1)) nodeToDelete += tle._1 + }) + } + + if (!nodeToAdd.isEmpty) this.addNodes(nodeToAdd) + if (!nodeToDelete.isEmpty) this.deleteNodes(nodeToDelete) + } +} \ No newline at end of file diff --git a/external-properties/src/main/scala/cn/pandadb/externalprops/SolrIterable.scala b/external-properties/src/main/scala/cn/pandadb/externalprops/SolrIterable.scala new file mode 100644 index 00000000..a54645de --- /dev/null +++ b/external-properties/src/main/scala/cn/pandadb/externalprops/SolrIterable.scala @@ -0,0 +1,147 @@ +package cn.pandadb.externalprops + +import org.apache.solr.client.solrj.SolrQuery +import org.apache.solr.client.solrj.impl.CloudSolrClient +import org.apache.solr.common.SolrDocument +import org.apache.log4j.Logger +import org.apache.solr.client.solrj.SolrQuery.ORDER +import org.apache.solr.common.params.CursorMarkParams + +import scala.collection.JavaConversions.asScalaBuffer +import scala.collection.JavaConversions.bufferAsJavaList +import scala.collection.JavaConversions.mapAsJavaMap +import scala.collection.JavaConversions.seqAsJavaList +import scala.collection.immutable.Map +import scala.collection.mutable.ArrayBuffer + +class SolrQueryResults(_solrClient: CloudSolrClient, solrQuery: SolrQuery, pageSize: Int = 20) { + + def iterator(): SolrQueryResultsIterator = new SolrQueryResultsIterator(_solrClient, solrQuery, pageSize) + + def getAllResults(): Iterable[NodeWithProperties] = { + val nodeArray = ArrayBuffer[NodeWithProperties]() + solrQuery.setRows(SolrUtil.maxRows) + val res = _solrClient.query(solrQuery).getResults + res.foreach( + x => { + nodeArray += SolrUtil.solrDoc2nodeWithProperties(x) + } + ) + nodeArray + } + + def iterator2(): SolrQueryResultsCursorIterator = new SolrQueryResultsCursorIterator(_solrClient, solrQuery, pageSize) +} + +class SolrQueryResultsCursorIterator(_solrClient: CloudSolrClient, solrQuery: SolrQuery, pageSize: Int = 20) + extends Iterator[NodeWithProperties] { + + var startOfCurrentPage = 0; + var rowIteratorWithinCurrentPage: java.util.Iterator[NodeWithProperties] = null; + var isFinished = false; + val mySolrQuery = solrQuery.getCopy(); + var cursorMark = CursorMarkParams.CURSOR_MARK_START + var nextCursorMark: String = null + private var currentData : Iterable[NodeWithProperties] = _ + mySolrQuery.setRows(pageSize) + mySolrQuery.setSort(SolrUtil.idName, ORDER.asc) + readNextPage(); + + def doc2Node(doc : SolrDocument): NodeWithProperties = { + SolrUtil.solrDoc2nodeWithProperties(doc) + } + + def readNextPage(): Boolean = { + + mySolrQuery.set(CursorMarkParams.CURSOR_MARK_PARAM, cursorMark) + val rsp = _solrClient.query(mySolrQuery) + nextCursorMark = rsp.getNextCursorMark + val docs = rsp.getResults() + val rows = docs.map {doc2Node} + currentData = null + currentData = rows + rowIteratorWithinCurrentPage = rows.iterator() + if (cursorMark.equals(nextCursorMark)) { + isFinished = true + false + } + else { + cursorMark = nextCursorMark + true + } + + } + + def hasNext(): Boolean = { + rowIteratorWithinCurrentPage.hasNext() || readNextPage() + } + + def next(): NodeWithProperties = { + + rowIteratorWithinCurrentPage.next() + + } + + def getCurrentData(): Iterable[NodeWithProperties] = { + this.currentData + } + +} + +class SolrQueryResultsIterator(_solrClient: CloudSolrClient, solrQuery: SolrQuery, pageSize: Int = 20) + extends Iterator[NodeWithProperties] { + + var startOfCurrentPage = 0; + var rowIteratorWithinCurrentPage: java.util.Iterator[NodeWithProperties] = null; + var totalCountOfRows = -1L; + val mySolrQuery = solrQuery.getCopy(); + var done = true + private var currentData : Iterable[NodeWithProperties] = _ + readNextPage(); + + def doc2Node(doc : SolrDocument): NodeWithProperties = { + SolrUtil.solrDoc2nodeWithProperties(doc) + } + + def readNextPage(): Boolean = { + + if (totalCountOfRows < 0 || startOfCurrentPage < totalCountOfRows) { + mySolrQuery.set("start", startOfCurrentPage); + mySolrQuery.set("rows", pageSize); + startOfCurrentPage += pageSize; + //logger.debug(s"executing solr query: $mySolrQuery"); + val rsp = _solrClient.query(mySolrQuery); + val docs = rsp.getResults(); + totalCountOfRows = docs.getNumFound(); + //logger.debug(s"numFound: $totalCountOfRows"); + val rows = docs.map {doc2Node}; + currentData = null + currentData = rows + rowIteratorWithinCurrentPage = rows.iterator(); + true; + } + else { + false; + } + } + + def hasNext(): Boolean = { + rowIteratorWithinCurrentPage.hasNext() || startOfCurrentPage < totalCountOfRows + } + + def next(): NodeWithProperties = { + + if (!rowIteratorWithinCurrentPage.hasNext()) { + + if (!readNextPage()) throw new NoSuchElementException(); + + } + rowIteratorWithinCurrentPage.next() + + } + + def getCurrentData(): Iterable[NodeWithProperties] = { + this.currentData + } + +} \ No newline at end of file diff --git a/external-properties/src/main/scala/cn/pandadb/externalprops/module.scala b/external-properties/src/main/scala/cn/pandadb/externalprops/module.scala new file mode 100644 index 00000000..ee3f48c1 --- /dev/null +++ b/external-properties/src/main/scala/cn/pandadb/externalprops/module.scala @@ -0,0 +1,34 @@ +package cn.pandadb.externalprops + +import cn.pandadb.util._ + +class ExternalPropertiesModule extends PandaModule { + override def init(ctx: PandaModuleContext): Unit = { + val conf = ctx.configuration; + import cn.pandadb.util.ConfigUtils._ + + val isExternalPropertyStorageEnabled = conf.getValueAsBoolean("external.property.storage.enabled", false) + if (isExternalPropertyStorageEnabled) { + val factoryClassName = conf.getRequiredValueAsString("external.properties.store.factory") + + val store = Class.forName(factoryClassName).newInstance().asInstanceOf[ExternalPropertyStoreFactory].create(conf) + ExternalPropertiesContext.bindCustomPropertyNodeStore(store); + } + } + + override def close(ctx: PandaModuleContext): Unit = { + ExternalPropertiesContext.maybeCustomPropertyNodeStore.foreach(_.start(ctx)) + } + + override def start(ctx: PandaModuleContext): Unit = { + ExternalPropertiesContext.maybeCustomPropertyNodeStore.foreach(_.close(ctx)) + } +} + +object ExternalPropertiesContext extends ContextMap { + def maybeCustomPropertyNodeStore: Option[CustomPropertyNodeStore] = getOption[CustomPropertyNodeStore] + + def bindCustomPropertyNodeStore(store: CustomPropertyNodeStore): Unit = put[CustomPropertyNodeStore](store); + + def isExternalPropStorageEnabled: Boolean = maybeCustomPropertyNodeStore.isDefined +} \ No newline at end of file diff --git a/external-properties/src/main/scala/org/neo4j/cypher/internal/compiler/v3_5/planner/logical/QueryPlannerConfiguration.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/compiler/v3_5/planner/logical/QueryPlannerConfiguration.scala new file mode 100644 index 00000000..eb311288 --- /dev/null +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/compiler/v3_5/planner/logical/QueryPlannerConfiguration.scala @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +package org.neo4j.cypher.internal.compiler.v3_5.planner.logical + +import cn.pandadb.externalprops.{ExternalPropertiesContext, CustomPropertyNodeStore} +import org.neo4j.cypher.internal.compiler.v3_5.planner.logical.steps._ +import org.neo4j.cypher.internal.compiler.v3_5.{UpdateStrategy, defaultUpdateStrategy} +import org.neo4j.cypher.internal.ir.v3_5.{InterestingOrder, QueryGraph} +import org.neo4j.cypher.internal.v3_5.logical.plans.LogicalPlan + +object QueryPlannerConfiguration { + + val bypassIndex = ExternalPropertiesContext.isExternalPropStorageEnabled + + private val noIndexleafPlanFromExpressions: IndexedSeq[LeafPlanner with LeafPlanFromExpressions] = IndexedSeq( + // MATCH (n) WHERE id(n) IN ... RETURN n + idSeekLeafPlanner, + + // MATCH (n:Person) RETURN n + labelScanLeafPlanner + ) + + private val leafPlanFromExpressions: IndexedSeq[LeafPlanner with LeafPlanFromExpressions] = IndexedSeq( + // MATCH (n) WHERE id(n) IN ... RETURN n + idSeekLeafPlanner, + + // MATCH (n) WHERE n.prop IN ... RETURN n + indexSeekLeafPlanner, + + // MATCH (n) WHERE has(n.prop) RETURN n + // MATCH (n:Person) WHERE n.prop CONTAINS ... + indexScanLeafPlanner, + + // MATCH (n:Person) RETURN n + labelScanLeafPlanner + ) + + private val leafPlanFromExpressionsSwitch = if (bypassIndex) noIndexleafPlanFromExpressions else leafPlanFromExpressions + + val allLeafPlanners = leafPlanFromExpressionsSwitch ++ IndexedSeq( + argumentLeafPlanner, + + // MATCH (n) RETURN n + allNodesLeafPlanner, + + // Handles OR between other leaf planners + OrLeafPlanner(leafPlanFromExpressions)) + + val default: QueryPlannerConfiguration = { + val predicateSelector = Selector(pickBestPlanUsingHintsAndCost, + selectPatternPredicates, + triadicSelectionFinder, + selectCovered, + selectHasLabelWithJoin + ) + + QueryPlannerConfiguration( + pickBestCandidate = pickBestPlanUsingHintsAndCost, + applySelections = predicateSelector, + optionalSolvers = Seq( + applyOptional, + leftOuterHashJoin, + rightOuterHashJoin + ), + leafPlanners = LeafPlannerList(allLeafPlanners), + updateStrategy = defaultUpdateStrategy + ) + + } +} + +case class QueryPlannerConfiguration(leafPlanners: LeafPlannerIterable, + applySelections: PlanSelector, + optionalSolvers: Seq[OptionalSolver], + pickBestCandidate: CandidateSelectorFactory, + updateStrategy: UpdateStrategy) { + + def toKit(interestingOrder: InterestingOrder, context: LogicalPlanningContext) = + QueryPlannerKit( + select = (plan: LogicalPlan, qg: QueryGraph) => applySelections(plan, qg, interestingOrder, context), + pickBest = pickBestCandidate(context) + ) + + def withLeafPlanners(leafPlanners: LeafPlannerIterable): QueryPlannerConfiguration = copy(leafPlanners = leafPlanners) + + def withUpdateStrategy(updateStrategy: UpdateStrategy): QueryPlannerConfiguration = copy(updateStrategy = updateStrategy) +} + +case class QueryPlannerKit(select: (LogicalPlan, QueryGraph) => LogicalPlan, pickBest: CandidateSelector) { + def select(plans: Iterable[Seq[LogicalPlan]], qg: QueryGraph): Iterable[Seq[LogicalPlan]] = + plans.map(_.map(plan => select(plan, qg))) +} \ No newline at end of file diff --git a/external-properties/src/main/scala/org/neo4j/cypher/internal/compiler/v3_5/planner/logical/steps/verifyBestPlan.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/compiler/v3_5/planner/logical/steps/verifyBestPlan.scala new file mode 100644 index 00000000..a1ad4601 --- /dev/null +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/compiler/v3_5/planner/logical/steps/verifyBestPlan.scala @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +package org.neo4j.cypher.internal.compiler.v3_5.planner.logical.steps + +import cn.pandadb.externalprops.{ExternalPropertiesContext, CustomPropertyNodeStore} +import org.neo4j.cypher.internal.compiler.v3_5.planner.logical.{LogicalPlanningContext, PlanTransformer} +import org.neo4j.cypher.internal.compiler.v3_5.{IndexHintUnfulfillableNotification, JoinHintUnfulfillableNotification} +import org.neo4j.cypher.internal.ir.v3_5.PlannerQuery +import org.neo4j.cypher.internal.planner.v3_5.spi.PlanContext +import org.neo4j.cypher.internal.v3_5.logical.plans.LogicalPlan +import org.neo4j.cypher.internal.v3_5.ast._ +import org.neo4j.cypher.internal.v3_5.expressions.LabelName +import org.neo4j.cypher.internal.v3_5.util._ + +object verifyBestPlan extends PlanTransformer { + + val bypassIndex = ExternalPropertiesContext.isExternalPropStorageEnabled + + def apply(plan: LogicalPlan, expected: PlannerQuery, context: LogicalPlanningContext): LogicalPlan = { + val constructed = context.planningAttributes.solveds.get(plan.id) + if (expected != constructed) { + val unfulfillableIndexHints = findUnfulfillableIndexHints(expected, context.planContext) + val unfulfillableJoinHints = findUnfulfillableJoinHints(expected, context.planContext) + val expectedWithoutHints = expected.withoutHints(unfulfillableIndexHints ++ unfulfillableJoinHints) + if (expectedWithoutHints != constructed) { + val a: PlannerQuery = expected.withoutHints(expected.allHints) + val b: PlannerQuery = constructed.withoutHints(constructed.allHints) + if (a != b) { + // unknown planner issue failed to find plan (without regard for differences in hints) + throw new InternalException(s"Expected \n$expected \n\n\nInstead, got: \n$constructed\nPlan: $plan") + } else { + // unknown planner issue failed to find plan matching hints (i.e. "implicit hints") + val expectedHints = expected.allHints + val actualHints = constructed.allHints + val missing = expectedHints.diff(actualHints) + val solvedInAddition = actualHints.diff(expectedHints) + val inventedHintsAndThenSolvedThem = solvedInAddition.exists(!expectedHints.contains(_)) + if ( !bypassIndex && (missing.nonEmpty || inventedHintsAndThenSolvedThem)) { + def out(h: Seq[Hint]) = h.mkString("`", ", ", "`") + val details = if (missing.isEmpty) + s"""Expected: + |${out(expectedHints)} + | + |Instead, got: + |${out(actualHints)}""".stripMargin + else + s"Could not solve these hints: ${out(missing)}" + + val message = + s"""Failed to fulfil the hints of the query. + |$details + | + |Plan $plan""".stripMargin + + throw new HintException(message) + } + } + } else { + processUnfulfilledIndexHints(context, unfulfillableIndexHints) + processUnfulfilledJoinHints(plan, context, unfulfillableJoinHints) + } + } + plan + } + + private def processUnfulfilledIndexHints(context: LogicalPlanningContext, hints: Seq[UsingIndexHint]): Unit = { + if (hints.nonEmpty) { + // hints referred to non-existent indexes ("explicit hints") + if (context.useErrorsOverWarnings) { + val firstIndexHint = hints.head + throw new IndexHintException(firstIndexHint.variable.name, firstIndexHint.label.name, firstIndexHint.properties.map(_.name), "No such index") + } else { + hints.foreach { hint => + context.notificationLogger.log(IndexHintUnfulfillableNotification(hint.label.name, hint.properties.map(_.name))) + } + } + } + } + + private def processUnfulfilledJoinHints(plan: LogicalPlan, context: LogicalPlanningContext, hints: Seq[UsingJoinHint]): Unit = { + if (hints.nonEmpty) { + // we were unable to plan hash join on some requested nodes + if (context.useErrorsOverWarnings) { + val firstJoinHint = hints.head + throw new JoinHintException(firstJoinHint.variables.map(_.name).reduceLeft(_ + ", " + _), s"Unable to plan hash join. Instead, constructed\n$plan") + } else { + hints.foreach { hint => + context.notificationLogger.log(JoinHintUnfulfillableNotification(hint.variables.map(_.name).toIndexedSeq)) + } + } + } + } + + private def findUnfulfillableIndexHints(query: PlannerQuery, planContext: PlanContext): Seq[UsingIndexHint] = { + query.allHints.flatMap { + // using index name:label(property1,property2) + case UsingIndexHint(_, LabelName(label), properties, _) + if planContext.indexExistsForLabelAndProperties(label, properties.map(_.name)) => None + // no such index exists + case hint: UsingIndexHint => Some(hint) + // don't care about other hints + case _ => None + } + } + + private def findUnfulfillableJoinHints(query: PlannerQuery, planContext: PlanContext): Seq[UsingJoinHint] = { + query.allHints.collect { + case hint: UsingJoinHint => hint + } + } +} \ No newline at end of file diff --git a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/InterpretedPipeBuilder.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/InterpretedPipeBuilder.scala similarity index 96% rename from src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/InterpretedPipeBuilder.scala rename to external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/InterpretedPipeBuilder.scala index 5fa45b62..8eb81413 100644 --- a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/InterpretedPipeBuilder.scala +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/InterpretedPipeBuilder.scala @@ -19,7 +19,7 @@ */ package org.neo4j.cypher.internal.runtime.interpreted -import cn.graiph.util.Logging +import cn.pandadb.externalprops.{ExternalPropertiesContext, CustomPropertyNodeStore} import org.neo4j.cypher.internal.ir.v3_5.VarPatternLength import org.neo4j.cypher.internal.planner.v3_5.spi.TokenContext import org.neo4j.cypher.internal.runtime.ProcedureCallMode @@ -28,16 +28,16 @@ import org.neo4j.cypher.internal.runtime.interpreted.commands.convert.PatternCon import org.neo4j.cypher.internal.runtime.interpreted.commands.convert.{ExpressionConverters, InterpretedCommandProjection} import org.neo4j.cypher.internal.runtime.interpreted.commands.expressions.{AggregationExpression, Literal, ShortestPathExpression} import org.neo4j.cypher.internal.runtime.interpreted.commands.predicates.{Predicate, True} -import org.neo4j.cypher.internal.runtime.interpreted.pipes._ +import org.neo4j.cypher.internal.runtime.interpreted.pipes.{FilterPipe, _} import org.neo4j.cypher.internal.v3_5.ast.semantics.SemanticTable import org.neo4j.cypher.internal.v3_5.expressions.{Equals => ASTEquals, Expression => ASTExpression, _} import org.neo4j.cypher.internal.v3_5.logical.plans import org.neo4j.cypher.internal.v3_5.logical.plans.{ColumnOrder, Limit => LimitPlan, LoadCSV => LoadCSVPlan, Skip => SkipPlan, _} import org.neo4j.cypher.internal.v3_5.util.attribution.Id import org.neo4j.cypher.internal.v3_5.util.{Eagerly, InternalException} -import org.neo4j.kernel.impl.Settings import org.neo4j.values.AnyValue import org.neo4j.values.virtual.{NodeValue, RelationshipValue} +import cn.pandadb.util._ /** * Responsible for turning a logical plan with argument pipes into a new pipe. @@ -50,6 +50,8 @@ case class InterpretedPipeBuilder(recurse: LogicalPlan => Pipe, tokenContext: TokenContext) (implicit semanticTable: SemanticTable) extends PipeBuilder with Logging { + val nodeStore: Option[CustomPropertyNodeStore] = ExternalPropertiesContext.maybeCustomPropertyNodeStore + private def getBuildExpression(id: Id) = rewriteAstExpression andThen ((e: ASTExpression) => expressionConverters.toCommandExpression(id, e)) andThen (expression => expression.rewrite(KeyTokenResolver.resolveExpressions(_, tokenContext))) @@ -124,29 +126,21 @@ case class InterpretedPipeBuilder(recurse: LogicalPlan => Pipe, case Selection(predicate, _) => val predicateExpression = if (predicate.exprs.size == 1) buildExpression(predicate.exprs.head) else buildExpression(predicate) - - //NOTE: push down predicate - if (Settings._hookEnabled) { + val pipe = FilterPipe(source, predicateExpression)(id = id) + //NOTE: predicate push down + if (nodeStore.isDefined) { source match { case x: AllNodesScanPipe => - x.predicatePushDown(predicateExpression); - case _ => logger.debug("push down predicate: Pipe no match") + x.pushDownPredicate(nodeStore.get, pipe, predicateExpression) + case x: NodeByLabelScanPipe => + x.pushDownPredicate(nodeStore.get, pipe, predicateExpression, x.label.name) + case _ => } } - - FilterPipe(source, predicateExpression)(id = id) + pipe case Expand(_, fromName, dir, types: Seq[RelTypeName], toName, relName, ExpandAll) => - (Settings._patternMatchFirst, source) match { - //NOTE: yes! we use pattern match first!! - case (true, FilterPipe(source2, predicate2)) => { - logger.debug(s"perform pattern match first!"); - FilterPipe(ExpandAllPipe(source2, fromName, relName, toName, dir, LazyTypes(types.toArray))(id = id), predicate2)(id = source.id) - } - - //default behavier: use where predicate first - case _ => ExpandAllPipe(source, fromName, relName, toName, dir, LazyTypes(types.toArray))(id = id) - } + ExpandAllPipe(source, fromName, relName, toName, dir, LazyTypes(types.toArray))(id = id) case Expand(_, fromName, dir, types: Seq[RelTypeName], toName, relName, ExpandInto) => ExpandIntoPipe(source, fromName, relName, toName, dir, LazyTypes(types.toArray))(id = id) diff --git a/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/AllNodesScanPipe.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/AllNodesScanPipe.scala new file mode 100644 index 00000000..6f13894f --- /dev/null +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/AllNodesScanPipe.scala @@ -0,0 +1,23 @@ +package org.neo4j.cypher.internal.runtime.interpreted.pipes + +import org.neo4j.cypher.internal.runtime.interpreted._ +import org.neo4j.cypher.internal.v3_5.util.attribution.Id +import org.neo4j.values.virtual.{NodeValue, VirtualNodeValue} + +case class AllNodesScanPipe(ident: String)(val id: Id = Id.INVALID_ID) extends PredicatePushDownPipe { + + protected def internalCreateResults(state: QueryState): Iterator[ExecutionContext] = { + val baseContext = state.newExecutionContext(executionContextFactory) + var nodes: Option[Iterable[VirtualNodeValue]] = None + if (nodeStore.isDefined && predicate.isDefined && fatherPipe != null) { + nodes = fetchNodes(state, baseContext) + } + val nodesIterator = nodes match { + case Some(x) => + x.iterator + case None => + state.query.nodeOps.all + } + nodesIterator.map(n => executionContextFactory.copyWith(baseContext, ident, n)) + } +} \ No newline at end of file diff --git a/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/CreatePipe.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/CreatePipe.scala new file mode 100644 index 00000000..fecdfa33 --- /dev/null +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/CreatePipe.scala @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +package org.neo4j.cypher.internal.runtime.interpreted.pipes + +import org.neo4j.cypher.internal.runtime.interpreted._ +import org.neo4j.cypher.internal.runtime.interpreted.commands.expressions.Expression +import org.neo4j.cypher.internal.runtime.{LenientCreateRelationship, Operations, QueryContext} +import org.neo4j.function.ThrowingBiConsumer +import org.neo4j.values.AnyValue +import org.neo4j.values.storable.{Value, Values} +import org.neo4j.values.virtual.{NodeValue, RelationshipValue, VirtualNodeValue} +import org.neo4j.cypher.internal.v3_5.util.attribution.Id +import org.neo4j.cypher.internal.v3_5.util.{CypherTypeException, InternalException, InvalidSemanticsException} + +/** + * Extends PipeWithSource with methods for setting properties and labels on entities. + */ +abstract class BaseCreatePipe(src: Pipe) extends PipeWithSource(src) { + + /** + * Set properties on node by delegating to `setProperty`. + */ + protected def setProperties(context: ExecutionContext, + state: QueryState, + entityId: Long, + properties: Expression, + ops: Operations[_]): Unit = + properties(context, state) match { + case _: NodeValue | _: RelationshipValue => + throw new CypherTypeException("Parameter provided for node creation is not a Map") + case IsMap(map) => + map(state.query).foreach(new ThrowingBiConsumer[String, AnyValue, RuntimeException] { + override def accept(k: String, v: AnyValue): Unit = setProperty(entityId, k, v, state.query, ops) + }) + + case _ => + throw new CypherTypeException("Parameter provided for node creation is not a Map") + } + + /** + * Set property on node, or call `handleNoValue` if value is `NO_VALUE`. + */ + protected def setProperty(entityId: Long, + key: String, + value: AnyValue, + qtx: QueryContext, + ops: Operations[_]): Unit = { + //do not set properties for null values + if (value == Values.NO_VALUE) { + handleNoValue(key) + } else { + val propertyKeyId = qtx.getOrCreatePropertyKeyId(key) + ops.setProperty(entityId, propertyKeyId, makeValueNeoSafe(value)) + } + } + + /** + * Callback for when setProperty encounters a NO_VALUE + * + * @param key the property key associated with the NO_VALUE + */ + protected def handleNoValue(key: String): Unit +} + +/** + * Extend BaseCreatePipe with methods to create nodes and relationships from commands. + */ +abstract class EntityCreatePipe(src: Pipe) extends BaseCreatePipe(src) { + + /** + * Create node from command. + */ + protected def createNode(context: ExecutionContext, + state: QueryState, + data: CreateNodeCommand): (String, NodeValue) = { + val labelIds = data.labels.map(_.getOrCreateId(state.query).id).toArray + val node = state.query.createNode(labelIds) + data.properties.foreach(setProperties(context, state, node.id(), _, state.query.nodeOps)) + data.idName -> node + } + + /** + * Create relationship from command. + */ + protected def createRelationship(context: ExecutionContext, + state: QueryState, + data: CreateRelationshipCommand): (String, AnyValue) = { + val start = getNode(context, data.idName, data.startNode, state.lenientCreateRelationship) + val end = getNode(context, data.idName, data.endNode, state.lenientCreateRelationship) + + val relationship = + if (start == null || end == null) + Values.NO_VALUE // lenient create relationship NOOPs on missing node + else { + val typeId = data.relType.typ(state.query) + val relationship = state.query.createRelationship(start.id(), end.id(), typeId) + data.properties.foreach(setProperties(context, state, relationship.id(), _, state.query.relationshipOps)) + relationship + } + data.idName -> relationship + } + + private def getNode(row: ExecutionContext, relName: String, name: String, lenient: Boolean): VirtualNodeValue = + row.get(name) match { + case Some(n: VirtualNodeValue) => n + case Some(Values.NO_VALUE) => + if (lenient) null + else throw new InternalException(LenientCreateRelationship.errorMsg(relName, name)) + case Some(x) => throw new InternalException(s"Expected to find a node at '$name' but found instead: $x") + case None => throw new InternalException(s"Expected to find a node at '$name' but found instead: null") + } +} + +/** + * Creates nodes and relationships from the constructor commands. + */ +case class CreatePipe(src: Pipe, nodes: Array[CreateNodeCommand], relationships: Array[CreateRelationshipCommand]) + (val id: Id = Id.INVALID_ID) extends EntityCreatePipe(src) { + + override def internalCreateResults(input: Iterator[ExecutionContext], state: QueryState): Iterator[ExecutionContext] = + input.map(row => { + nodes.foreach { nodeCommand => + val (key, node) = createNode(row, state, nodeCommand) + row.set(key, node) + } + + relationships.foreach{ relCommand => + val (key, node) = createRelationship(row, state, relCommand) + row.set(key, node) + } + + row + }) + + override protected def handleNoValue(key: String) { + // do nothing + } +} + +case class CreateNodeCommand(idName: String, + labels: Seq[LazyLabel], + properties: Option[Expression]) + +case class CreateRelationshipCommand(idName: String, + startNode: String, + relType: LazyType, + endNode: String, + properties: Option[Expression]) + +/** + * Create a node corresponding to the constructor command. + * + * Differs from CreatePipe in that it throws on NO_VALUE properties. Merge cannot use null properties, + * * since in that case the match part will not find the result of the create. + */ +case class MergeCreateNodePipe(src: Pipe, data: CreateNodeCommand) + (val id: Id = Id.INVALID_ID) extends EntityCreatePipe(src) { + + override def internalCreateResults(input: Iterator[ExecutionContext], state: QueryState): Iterator[ExecutionContext] = + input.map(inRow => { + val (idName, node) = createNode(inRow, state, data) + inRow.copyWith(idName, node) + }) + + override protected def handleNoValue(key: String): Unit = { + throw new InvalidSemanticsException(s"Cannot merge node using null property value for $key") + } +} + +/** + * Create a relationship corresponding to the constructor command. + * + * Differs from CreatePipe in that it throws on NO_VALUE properties. Merge cannot use null properties, + * since in that case the match part will not find the result of the create. + */ +case class MergeCreateRelationshipPipe(src: Pipe, data: CreateRelationshipCommand) + (val id: Id = Id.INVALID_ID) + extends EntityCreatePipe(src) { + + override def internalCreateResults(input: Iterator[ExecutionContext], state: QueryState): Iterator[ExecutionContext] = + input.map(inRow => { + val (idName, relationship) = createRelationship(inRow, state, data) + inRow.copyWith(idName, relationship) + }) + + override protected def handleNoValue(key: String): Unit = { + throw new InvalidSemanticsException(s"Cannot merge relationship using null property value for $key") + } +} \ No newline at end of file diff --git a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/ExpandAllPipe.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/ExpandAllPipe.scala similarity index 64% rename from src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/ExpandAllPipe.scala rename to external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/ExpandAllPipe.scala index 42fc4fbc..7b234e50 100644 --- a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/ExpandAllPipe.scala +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/ExpandAllPipe.scala @@ -1,3 +1,22 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ package org.neo4j.cypher.internal.runtime.interpreted.pipes import org.neo4j.cypher.internal.runtime.interpreted.ExecutionContext @@ -6,7 +25,7 @@ import org.neo4j.cypher.internal.v3_5.util.attribution.Id import org.neo4j.cypher.internal.v3_5.expressions.SemanticDirection import org.neo4j.values.AnyValue import org.neo4j.values.storable.Values -import org.neo4j.values.virtual.{RelationshipValue, NodeValue} +import org.neo4j.values.virtual.{NodeValue, RelationshipValue, VirtualNodeValue} case class ExpandAllPipe(source: Pipe, fromName: String, @@ -20,7 +39,7 @@ case class ExpandAllPipe(source: Pipe, input.flatMap { row => getFromNode(row) match { - case n: NodeValue => + case n: VirtualNodeValue => val relationships: Iterator[RelationshipValue] = state.query.getRelationshipsForIds(n.id(), dir, types.types(state.query)) relationships.map { r => val other = r.otherNode(n) @@ -38,4 +57,4 @@ case class ExpandAllPipe(source: Pipe, def getFromNode(row: ExecutionContext): AnyValue = row.getOrElse(fromName, throw new InternalException(s"Expected to find a node at '$fromName' but found nothing")) -} +} \ No newline at end of file diff --git a/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/FilterPipe.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/FilterPipe.scala new file mode 100644 index 00000000..bc29797d --- /dev/null +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/FilterPipe.scala @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Neo4j is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +package org.neo4j.cypher.internal.runtime.interpreted.pipes + +import org.neo4j.cypher.internal.runtime.interpreted.ExecutionContext +import org.neo4j.cypher.internal.runtime.interpreted.commands.expressions.Expression +import org.neo4j.values.storable.Values +import org.neo4j.cypher.internal.v3_5.util.attribution.Id + +case class FilterPipe(source: Pipe, predicate: Expression) + (val id: Id = Id.INVALID_ID) extends PipeWithSource(source) { + + var _optBypass: Boolean = false + + def bypass(isBypass: Boolean = true): Unit = { + _optBypass = isBypass + } + + predicate.registerOwningPipe(this) + + protected def internalCreateResults(input: Iterator[ExecutionContext], state: QueryState): Iterator[ExecutionContext] = { + if (_optBypass) input else input.filter (ctx => predicate (ctx, state) eq Values.TRUE) + } +} \ No newline at end of file diff --git a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/NodeByLabelScanPipe.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/NodeByLabelScanPipe.scala similarity index 57% rename from src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/NodeByLabelScanPipe.scala rename to external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/NodeByLabelScanPipe.scala index 397ce927..35d84b64 100644 --- a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/NodeByLabelScanPipe.scala +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/NodeByLabelScanPipe.scala @@ -21,39 +21,27 @@ package org.neo4j.cypher.internal.runtime.interpreted.pipes import org.neo4j.cypher.internal.runtime.interpreted.ExecutionContext import org.neo4j.cypher.internal.v3_5.util.attribution.Id - -import scala.collection.mutable -import org.neo4j.kernel.impl.CustomPropertyNodeStoreHolder -import org.neo4j.values.virtual.NodeValue +import org.neo4j.values.virtual.{NodeValue, VirtualNodeValue} case class NodeByLabelScanPipe(ident: String, label: LazyLabel) - (val id: Id = Id.INVALID_ID) extends Pipe { + (val id: Id = Id.INVALID_ID) extends PredicatePushDownPipe { protected def internalCreateResults(state: QueryState): Iterator[ExecutionContext] = { - /* - label.getOptId(state.query) match { - case Some(labelId) => - val nodes = state.query.getNodesByLabel(labelId.id) - nodes.foreach(n=>println(n.id())) - val baseContext = state.newExecutionContext(executionContextFactory) - nodes.map(n => executionContextFactory.copyWith(baseContext, ident, n)) - case None => - Iterator.empty - } - */ - // todo: optimize label.getOptId(state.query) match { case Some(labelId) => - //val nodes = state.query.getNodesByLabel(labelId.id) - val customPropertyNodes = CustomPropertyNodeStoreHolder.get.getNodesByLabel(label.name) - val nodesArray = mutable.ArrayBuffer[NodeValue]() - customPropertyNodes.foreach(v=>{ - nodesArray.append(v.toNeo4jNodeValue()) - }) - val nodes = nodesArray.toIterator val baseContext = state.newExecutionContext(executionContextFactory) - nodes.map(n => executionContextFactory.copyWith(baseContext, ident, n)) + var nodes: Option[Iterable[VirtualNodeValue]] = None + if (nodeStore.isDefined && fatherPipe != null) { + nodes = fetchNodes(state, baseContext) + } + val nodesIterator = nodes match { + case Some(x) => + x.iterator + case None => + state.query.getNodesByLabel(labelId.id) + } + nodesIterator.map(n => executionContextFactory.copyWith(baseContext, ident, n)) case None => Iterator.empty } diff --git a/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/PredicatePushDownPipe.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/PredicatePushDownPipe.scala new file mode 100644 index 00000000..582acacb --- /dev/null +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/PredicatePushDownPipe.scala @@ -0,0 +1,103 @@ +package org.neo4j.cypher.internal.runtime.interpreted.pipes + +import cn.pandadb.externalprops.CustomPropertyNodeStore +import org.neo4j.cypher.internal.runtime.interpreted.commands.expressions.{Expression, ParameterExpression, Property, SubstringFunction, ToIntegerFunction} +import org.neo4j.cypher.internal.runtime.interpreted.commands.predicates._ +import org.neo4j.cypher.internal.runtime.interpreted.commands.values.KeyToken +import org.neo4j.cypher.internal.runtime.interpreted.{NFPredicate, _} +import org.neo4j.cypher.internal.v3_5.util.{Fby, Last, NonEmptyList} +import org.neo4j.values.storable.{StringValue, Values} +import org.neo4j.values.virtual.{NodeValue, VirtualNodeValue, VirtualValues} + +trait PredicatePushDownPipe extends Pipe{ + + var nodeStore: Option[CustomPropertyNodeStore] = None + + var fatherPipe: Option[FilterPipe] = None + + var predicate: Option[Expression] = None + + var labelName: String = null + + def pushDownPredicate(nodeStore: CustomPropertyNodeStore, fatherPipe: FilterPipe, predicate: Expression, label: String = null): Unit = { + this.nodeStore = Some(nodeStore) + this.fatherPipe = Some(fatherPipe) + this.predicate = Some(predicate) + this.labelName = label + } + + private def convertPredicate(expression: Expression, state: QueryState, baseContext: ExecutionContext): NFPredicate = { + val expr: NFPredicate = expression match { + case GreaterThan(a: Property, b: ParameterExpression) => + NFGreaterThan(a.propertyKey.name, b.apply(baseContext, state)) + case GreaterThanOrEqual(a: Property, b: ParameterExpression) => + NFGreaterThanOrEqual(a.propertyKey.name, b.apply(baseContext, state)) + case LessThan(a: Property, b: ParameterExpression) => + NFLessThan(a.propertyKey.name, b.apply(baseContext, state)) + case LessThanOrEqual(a: Property, b: ParameterExpression) => + NFLessThanOrEqual(a.propertyKey.name, b.apply(baseContext, state)) + case Equals(a: Property, b: ParameterExpression) => + NFEquals(a.propertyKey.name, b.apply(baseContext, state)) + case Contains(a: Property, b: ParameterExpression) => + NFContainsWith(a.propertyKey.name, b.apply(baseContext, state).asInstanceOf[StringValue].stringValue()) + case StartsWith(a: Property, b: ParameterExpression) => + NFStartsWith(a.propertyKey.name, b.apply(baseContext, state).asInstanceOf[StringValue].stringValue()) + case EndsWith(a: Property, b: ParameterExpression) => + NFEndsWith(a.propertyKey.name, b.apply(baseContext, state).asInstanceOf[StringValue].stringValue()) + case RegularExpression(a: Property, b: ParameterExpression) => + NFRegexp(a.propertyKey.name, b.apply(baseContext, state).asInstanceOf[StringValue].stringValue()) + case PropertyExists(variable: Expression, propertyKey: KeyToken) => + NFHasProperty(propertyKey.name) + case x: Ands => + convertComboPredicatesLoop(NFAnd, x.predicates, state, baseContext) + case x: Ors => + convertComboPredicatesLoop(NFOr, x.predicates, state, baseContext) + case Not(p) => + val innerP: NFPredicate = convertPredicate(p, state, baseContext) + if (innerP == null) null else NFNot(innerP) + case _ => + null + } + expr + } + + private def convertComboPredicatesLoop(f: (NFPredicate, NFPredicate) => NFPredicate, + expression: NonEmptyList[Predicate], + state: QueryState, + baseContext: ExecutionContext): NFPredicate = { + val lhs = convertPredicate(expression.head, state, baseContext) + val rhs = if (expression.tailOption.isDefined) convertComboPredicatesLoop(f, expression.tailOption.get, state, baseContext) else null + if (rhs == null) { + lhs + } + else { + f(lhs, rhs) + } + } + + def fetchNodes(state: QueryState, baseContext: ExecutionContext): Option[Iterable[VirtualNodeValue]] = { + predicate match { + case Some(p) => + val expr: NFPredicate = convertPredicate(p, state, baseContext) + if (expr != null && (expr.isInstanceOf[NFAnd] || expr.isInstanceOf[NFOr] || expr.isInstanceOf[NFContainsWith])) {// only enable ppd when NFAnd, NFor + fatherPipe.get.bypass() + if (labelName != null) { + Some(nodeStore.get.getNodeBylabelAndFilter(labelName, expr).map(id => VirtualValues.node(id))) + } + else { + Some(nodeStore.get.filterNodes(expr).map(id => VirtualValues.node(id))) + } + } + else { + if (labelName != null) { + Some(nodeStore.get.getNodesByLabel(labelName).map(id => VirtualValues.node(id))) + } + else { + None + } + } + case None => + None + } + } +} diff --git a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/predicates.scala b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/predicates.scala similarity index 95% rename from src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/predicates.scala rename to external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/predicates.scala index be158c45..bf492072 100644 --- a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/predicates.scala +++ b/external-properties/src/main/scala/org/neo4j/cypher/internal/runtime/interpreted/predicates.scala @@ -63,4 +63,7 @@ case class NFOr(a: NFPredicate, b: NFPredicate) extends NFPredicate { } case class NFNot(a: NFPredicate) extends NFPredicate { +} + +case class NFConstantCachedIn(a: NFPredicate) extends NFPredicate { } \ No newline at end of file diff --git a/graiphdb-2019.iml b/graiphdb-2019.iml deleted file mode 100644 index 67da3a87..00000000 --- a/graiphdb-2019.iml +++ /dev/null @@ -1,192 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/itest/cypher-plugins.xml b/itest/cypher-plugins.xml new file mode 100644 index 00000000..d7f018f3 --- /dev/null +++ b/itest/cypher-plugins.xml @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/itest/itest/comprehensive/envirTest.properties b/itest/itest/comprehensive/envirTest.properties new file mode 100644 index 00000000..b15f3f8e --- /dev/null +++ b/itest/itest/comprehensive/envirTest.properties @@ -0,0 +1,2 @@ +zkServerAddr=10.0.86.26:2181 +clusterNodes=10.0.82.216:7685,10.0.82.217:7685,10.0.82.218:7685,10.0.82.219:7685 \ No newline at end of file diff --git a/itest/itest/performance/1230.txt b/itest/itest/performance/1230.txt new file mode 100644 index 00000000..f6ebfd23 --- /dev/null +++ b/itest/itest/performance/1230.txt @@ -0,0 +1,18 @@ +match (n:person)-[:write_paper]->(p:paper) where p.country = 'United States' return count(n) +match (n:person)-[:write_paper]->(p:paper) where p.country='United States' and n.citations>800 return count(n) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper) where o.country='United States' and n.citations>500 and p.citations>100 return count(n) +match (n:person)-[:write_paper]->(p:paper) where n.citations>800 and p.citation>100 return p.paperId +match (o:organization)<-[:work_for] -(n:person)-[:write_paper]->(p:paper) where n.citations>1000 and p.citation<100 and o.latitude>30 return count(p) +match (o:organization)<-[:work_for] -(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where n.citations>1000 and p2.citation<100 and o.country='United States' return count(p) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where n.citations>1000 and p2.citation<100 and o.latitude>30 return count(p) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where t.rank<5 and p.citation<100 and o.latitude>30 return count(p) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where t.rank<4 and p.citation>100 and o.longitude<130 return p.paperId +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where p.citation<100 and p2.citation>100 and o.latitude>30 return count(p) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where p.citation<100 and p2.citation>100 and o.country='United States' return count(p2) +match (pu:publications)<-[]-(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where p.citation<500 and p2.citation>200 and n.nationality='United States' return count(p2) +match (n:person)-[:write_paper]->(p:paper)-[:be_cited]->(p2:paper) where p.country = 'United States' and p2.citation>500 return n.nameEn +match (n:person)-[:write_paper]->(p:paper)-[:be_cited]->(p2:paper) where n.citations>1000 and p.citation>100 and p2.citation>500 return p2.paperId +match (n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where n.citations>1000 and p.citation>100 and t.rank<3 return distinct(n.nameEn) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where t.rank<5 and p.citation<100 and o.country='France' return distinct(p.paperId) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where t.rank<5 and p.country='United States' and o.citations>10000000 return distinct(n.nameEn) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:be_cited]->(p2:paper) where p2.citation>400 and p.country='United States' and o.citations>10000000 return distinct(n.nameEn) \ No newline at end of file diff --git a/itest/itest/performance/cypher.txt b/itest/itest/performance/cypher.txt new file mode 100644 index 00000000..011e15c8 --- /dev/null +++ b/itest/itest/performance/cypher.txt @@ -0,0 +1,30 @@ +Match(n) Return Distinct length(labels(n)), count(length(labels(n))); +Match(n) Return Distinct labels(n), count(labels(n)); +MATCH (p:person)-[wp:write_paper]->(pa:paper) -[r:paper_belong_topic]-> (t:topic) WHERE toInteger(subString(pa.publishDate,0,4)) >= 1995 AND toInteger(subString(pa.publishDate,0,4)) <= 2019 AND t.topicId IN ['185592680'] AND t.rank = 1 WITH DISTINCT(pa),pa.citation AS citationOfWorld ORDER BY citationOfWorld DESC LIMIT 10000 MATCH (p:person)-[wp:write_paper]->(pa) WHERE p.nationality = 'China' RETURN DISTINCT(toInteger(subString(pa.publishDate,0,4))) AS year, COUNT(pa) AS statistics, '中国' as region Order By year; +Match(n:person) return n.publications5 Order By n.publication5 DESC; +MATCH p=(a)-[r1:be_cited]->(b)-[r2:be_cited]->(c) RETURN count(p) +MATCH p=(a)<-[r1:write_paper]-(b)-[r2:work_for]->(c) RETURN count(p); +MATCH p=(a)<-[r1:be_cited]-(b)-[r2:be_cited]->(c) RETURN count(p) +Match(n:paper) where n.language='en' and n.type='Journal' return count(n); +Match(n:person) Return n.personId Order By n.citations Desc; +Match(n:person) where n.citations>5 return distinct n.nationality, count(n) Order by count(n) desc; +Match(n:paper) where toInteger(n.publishDate)>20000000 return distinct n.country, count(n) order by count(n) Desc; +MATCH (p:person)-[wp:write_paper]->(pa:paper) -[r:paper_belong_topic]-> (t:topic) WHERE toInteger(subString(pa.publishDate,0,4)) >= 1995 AND toInteger(subString(pa.publishDate,0,4)) <= 2019 AND t.topicId IN ['185592680'] AND t.rank = 1 WITH DISTINCT(pa),pa.citation AS citationOfWorld ORDER BY citationOfWorld DESC LIMIT 10000 MATCH (p:person)-[wp:write_paper]->(pa) WHERE p.nationality = 'China' RETURN DISTINCT(toInteger(subString(pa.publishDate,0,4))) AS year, COUNT(pa) AS statistics, '中国' as region order by year UNION ALL MATCH (p:person)-[wp:write_paper]->(pa:paper) -[r:paper_belong_topic]-> (t:topic) WHERE toInteger(subString(pa.publishDate,0,4)) >= 1995 AND toInteger(subString(pa.publishDate,0,4)) <= 2019 AND (t.topicId IN ['185592680']) AND t.rank = 1 WITH DISTINCT(pa) AS pa,pa.citation AS citation ORDER BY citation DESC LIMIT 10000 RETURN DISTINCT(toInteger(subString(pa.publishDate,0,4))) AS year, COUNT(pa) AS statistics, '世界' as region order by year; +match (n:person)-[:write_paper]->(p:paper) where p.country = 'United States' return count(n) +match (n:person)-[:write_paper]->(p:paper) where p.country='United States' and n.citations>800 return count(n) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper) where o.country='United States' and n.citations>500 and p.citations>100 return count(n) +match (n:person)-[:write_paper]->(p:paper) where n.citations>800 and p.citation>100 return p.paperId +match (o:organization)<-[:work_for] -(n:person)-[:write_paper]->(p:paper) where n.citations>1000 and p.citation<100 and o.latitude>30 return count(p) +match (o:organization)<-[:work_for] -(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where n.citations>1000 and p2.citation<100 and o.country='United States' return count(p) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where n.citations>1000 and p2.citation<100 and o.latitude>30 return count(p) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where t.rank<5 and p.citation<100 and o.latitude>30 return count(p) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where t.rank<4 and p.citation>100 and o.longitude<130 return p.paperId +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where p.citation<100 and p2.citation>100 and o.latitude>30 return count(p) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where p.citation<100 and p2.citation>100 and o.country='United States' return count(p2) +match (pu:publications)<-[]-(n:person)-[:write_paper]->(p:paper)-[:paper_reference]->(p2:paper) where p.citation<500 and p2.citation>200 and n.nationality='United States' return count(p2) +match (n:person)-[:write_paper]->(p:paper)-[:be_cited]->(p2:paper) where p.country = 'United States' and p2.citation>500 return n.nameEn +match (n:person)-[:write_paper]->(p:paper)-[:be_cited]->(p2:paper) where n.citations>1000 and p.citation>100 and p2.citation>500 return p2.paperId +match (n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where n.citations>1000 and p.citation>100 and t.rank<3 return distinct(n.nameEn) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where t.rank<5 and p.citation<100 and o.country='France' return distinct(p.paperId) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:paper_belong_topic]->(t:topic) where t.rank<5 and p.country='United States' and o.citations>10000000 return distinct(n.nameEn) +match (o:organization)<-[:work_for]-(n:person)-[:write_paper]->(p:paper)-[:be_cited]->(p2:paper) where p2.citation>400 and p.country='United States' and o.citations>10000000 return distinct(n.nameEn) \ No newline at end of file diff --git a/itest/itest/performance/cypherOnLarge b/itest/itest/performance/cypherOnLarge new file mode 100644 index 00000000..09f712ce --- /dev/null +++ b/itest/itest/performance/cypherOnLarge @@ -0,0 +1,10 @@ +Match (n:paper) where toInteger(n.publishDate)>20000000 return distinct n.paperType, count(n.paperType); +Match(n) return distinct labels(n), count(n); +Match (n)-[r:org_paper]->(m) where n.cnName contains '医院' AND m.citation>5 Return count(m); +Match(n:person) return n.chineseName Order By n.influenceScore Desc limit 25; +Match(n:person) With distinct n.chineseName as name, count(n.chineseName) as counts where counts>1 return name, counts; +Match p=(n1:dictionary_ccs)<-[r1:criterion_belong_ccs]-(n2:criterion)<-[r2:org_criterion]-(n:organization)-[r:org_criterion]->(n3:criterion)-[r3:criterion_belong_ccs]->(n4:dictionary_ccs) Where not n1.dictionaryId = n4.dictionaryId return count(n) +MAtch(n:paper) where n.paperType contains '期刊' return count(n); +Match(n:patent) where toInteger(n.awardDate) > 20180000 return count(n); +Match(n:paper_keywords) where n.times_all>1 return count(n); +Match(n:patent) where n.chineseName contains '装置' return count(n); \ No newline at end of file diff --git a/itest/itest/performance/cypher_0913_graph500 b/itest/itest/performance/cypher_0913_graph500 new file mode 100644 index 00000000..40d7010e --- /dev/null +++ b/itest/itest/performance/cypher_0913_graph500 @@ -0,0 +1,5 @@ +Match (n) where id(n)<10 with n Match p=(n)-->(m1)-->(m2) return count(p) +Match p=(m1)-->(n)<--(m2) where id(n)<10 return count(p) +Match p=(m1)-->(n)<--(m2) where id(n)<10 and id(m1)>id(m2) return count(p) +Match (n)-->(m) return distinct n.id, count(m) order by count(m) DESc limit 100 +Match (n)-->(m) return distinct m.id, count(n) order by count(n) DESc limit 100 diff --git a/itest/itest/performance/performanceConf.properties b/itest/itest/performance/performanceConf.properties new file mode 100644 index 00000000..e513b701 --- /dev/null +++ b/itest/itest/performance/performanceConf.properties @@ -0,0 +1,5 @@ +neo4jResultFile=neo4j.txt +PandaDBResultFile=pandadb.txt +statementsFile=cypherOnLarge +boltURI=bolt://10.0.82.220:7687 +zkServerAddr=10.0.82.216:2181,10.0.82.217:2181 \ No newline at end of file diff --git a/itest/pom.xml b/itest/pom.xml new file mode 100644 index 00000000..b832ae6b --- /dev/null +++ b/itest/pom.xml @@ -0,0 +1,56 @@ + + + + parent + cn.pandadb + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + itest + + + + org.scala-lang.modules + scala-parser-combinators_2.11 + + + cn.pandadb + blob-feature + ${pandadb.version} + compile + + + cn.pandadb + server + ${pandadb.version} + compile + + + cn.pandadb + tools + ${pandadb.version} + compile + + + cn.pandadb + aipm-library + ${pandadb.version} + runtime + + + org.scalatest + scalatest_${scala.compat.version} + test + + + junit + junit + + + + \ No newline at end of file diff --git a/itest/src/main/scala/cn/pandadb/itest/performance/PerformanceTests.scala b/itest/src/main/scala/cn/pandadb/itest/performance/PerformanceTests.scala new file mode 100644 index 00000000..829960dc --- /dev/null +++ b/itest/src/main/scala/cn/pandadb/itest/performance/PerformanceTests.scala @@ -0,0 +1,158 @@ +package perfomance + +import java.io.{File, FileInputStream, FileWriter} +import java.text.SimpleDateFormat +import java.util.{Date, Properties} + +import cn.pandadb.externalprops.{ExternalPropertiesContext, InElasticSearchPropertyNodeStore} +import cn.pandadb.util.GlobalContext +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.graphdb.factory.GraphDatabaseFactory + +import scala.io.Source + + +trait TestBase { + + def nowDate: String = { + val now = new Date + val dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") + dateFormat.format(now) + } + +} + + +object PerformanceTests extends TestBase { + + var esNodeStores: Option[InElasticSearchPropertyNodeStore] = None + + def createPandaDB(props: Properties): GraphDatabaseService = { + var graphPath = "" + if (props.containsKey("graph.data.path")) graphPath = props.get("graph.data.path").toString + else throw new Exception("Configure File Error: graph.data.path is not exist! ") + val graphFile = new File(graphPath) + if (!graphFile.exists) throw new Exception(String.format("Error: GraphPath(%s) is not exist! ", graphPath)) + + val esHost = props.getProperty("external.properties.store.es.host") + val esPort = props.getProperty("external.properties.store.es.port").toInt + val esSchema = props.getProperty("external.properties.store.es.schema") + val esIndex = props.getProperty("external.properties.store.es.index") + val esType = props.getProperty("external.properties.store.es.type") + val esScrollSize = props.getProperty("external.properties.store.es.scroll.size", "1000").toInt + val esScrollTime = props.getProperty("external.properties.store.es.scroll.time.minutes", "10").toInt + val esNodeStore = new InElasticSearchPropertyNodeStore(esHost, esPort, esIndex, esType, esSchema, esScrollSize, esScrollTime) + ExternalPropertiesContext.bindCustomPropertyNodeStore(esNodeStore) + GlobalContext.setLeaderNode(true) + esNodeStores = Some(esNodeStore) + + new GraphDatabaseFactory().newEmbeddedDatabase(graphFile) + } + + def createNeo4jDB(props: Properties): GraphDatabaseService = { + var graphPath = "" + if (props.containsKey("graph.data.path")) graphPath = props.get("graph.data.path").toString + else throw new Exception("Configure File Error: graph.data.path is not exist! ") + val graphFile = new File(graphPath) + if (!graphFile.exists) throw new Exception(String.format("Error: GraphPath(%s) is not exist! ", graphPath)) + + new GraphDatabaseFactory().newEmbeddedDatabase(graphFile) + } + + def main(args: Array[String]): Unit = { + + var propFilePath = "/home/bigdata/pandadb-2019/itest/testdata/performance-test.conf" // null; + if (args.length > 0) propFilePath = args(0) + val props = new Properties + props.load(new FileInputStream(new File(propFilePath))) + + var testDb = "neo4j" + var graphPath = "" + var logFileDir = "" + var cyhperFilePath = "" + + if (props.containsKey("test.db")) testDb = props.get("test.db").toString.toLowerCase() + else throw new Exception("Configure File Error: test.db is not exist! ") + + if (props.containsKey("graph.data.path")) graphPath = props.get("graph.data.path").toString + else throw new Exception("Configure File Error: graph.data.path is not exist! ") + + if (props.containsKey("log.file.dir")) logFileDir = props.get("log.file.dir").toString + else throw new Exception("Configure File Error: log.file.dir is not exist! ") + + if (props.containsKey("test.cyhper.path")) cyhperFilePath = props.get("test.cyhper.path").toString + else throw new Exception("Configure File Error: test.cyhper.path is not exist! ") + + val logDir: File = new File(logFileDir) + if (!logDir.exists()) { + logDir.mkdirs + println("make log dir") + } + val logFileName = new SimpleDateFormat("MMdd-HHmmss").format(new Date) + ".log" + val logFile = new File(logDir, logFileName) + + println("Neo4j Test") + println(s"GraphDataPath: ${graphPath} \n LogFilePath: ${logFile.getAbsolutePath}") + + val logFw = new FileWriter(logFile) + logFw.write(s"GraphDataPath: $graphPath \n") + val cyhpers = readCyphers(cyhperFilePath) + var db: GraphDatabaseService = null + + if (testDb.equals("neo4j")) { + println(s"testDB: neo4j \n") + logFw.write(s"testDB: neo4j \n") + db = createNeo4jDB(props) + } + else if (testDb.equals("pandadb")) { + println(s"testDB: pandadb \n") + logFw.write(s"testDB: pandadb \n") + db = createPandaDB(props) + } + + if (db == null) { + throw new Exception("DB is null") + } + + println("==== begin tests ====") + val beginTime = nowDate + println(beginTime) + + try { + var i = 0 + cyhpers.foreach(cyhper => { + i += 1 + val tx = db.beginTx() + val mills0 = System.currentTimeMillis() + val res = db.execute(cyhper) + val useMills = System.currentTimeMillis() - mills0 + tx.close() + println(s"$i, $useMills") + logFw.write(s"\n====\n$cyhper\n") + logFw.write(s"UsedTime(ms): $useMills \n") + logFw.flush() + }) + } + finally { + logFw.close() + if (testDb == "pandadb" && esNodeStores.isDefined) { + esNodeStores.get.esClient.close() + } + db.shutdown() + } + + println("==== end tests ====") + val endTime = nowDate + println("Begin Time: " + beginTime) + println("End Time: " + endTime) + + } + + def readCyphers(filePath: String): Iterable[String] = { + val source = Source.fromFile(filePath, "UTF-8") + val lines = source.getLines().toArray + source.close() + lines + } + +} diff --git a/itest/src/test/resources/clusterLog.json b/itest/src/test/resources/clusterLog.json new file mode 100644 index 00000000..355bd5ca --- /dev/null +++ b/itest/src/test/resources/clusterLog.json @@ -0,0 +1 @@ +{"dataLog":[{"versionNum":1,"command":"Create(n:Test{version:1})"},{"versionNum":2,"command":"Create(n:Test{version:2})"},{"versionNum":3,"command":"Create(n:Test{version:3})"}]} \ No newline at end of file diff --git a/itest/src/test/resources/localLog.json b/itest/src/test/resources/localLog.json new file mode 100644 index 00000000..b9beaeaa --- /dev/null +++ b/itest/src/test/resources/localLog.json @@ -0,0 +1 @@ +{"dataLog":[{"versionNum":1,"command":"Create(n:Test{version:1})"},{"versionNum":2,"command":"Create(n:Test{version:2})"}]} \ No newline at end of file diff --git a/src/test/resources/log4j.properties b/itest/src/test/resources/log4j.properties similarity index 94% rename from src/test/resources/log4j.properties rename to itest/src/test/resources/log4j.properties index 7fd556b6..f9f8ea82 100644 --- a/src/test/resources/log4j.properties +++ b/itest/src/test/resources/log4j.properties @@ -5,7 +5,7 @@ log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%d{HH:mm:ss:SSS}] %-5p %-20c{1} :: %m%n #log4j.logger.org.neo4j.values.storable=DEBUG -log4j.logger.org.neo4j=WARN +log4j.logger.org.neo4j=DEBUG log4j.logger.org.neo4j.server=DEBUG log4j.logger.cn=DEBUG log4j.logger.org=WARN diff --git a/itest/src/test/scala/PandaDBTestBase.scala b/itest/src/test/scala/PandaDBTestBase.scala new file mode 100644 index 00000000..c94b3b34 --- /dev/null +++ b/itest/src/test/scala/PandaDBTestBase.scala @@ -0,0 +1,74 @@ +import java.io.{File, FileInputStream} +import java.util.Properties +import java.util.concurrent.{ExecutorService, Executors} + +import cn.pandadb.network.NodeAddress +import cn.pandadb.tool.PNodeServerStarter +import org.junit.Test +import org.neo4j.driver.{Driver, GraphDatabase, StatementResult} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 12:04 2019/12/26 + * @Modified By: + */ +abstract class PandaDBTestBase { + var serialNum = 0 + val threadPool: ExecutorService = Executors.newFixedThreadPool(3) + +// def startLocalPNodeServer(): NodeAddress = { +// val startCmd = s"cmd.exe /c mvn exec:java -Dexec.mainClass='cn.pandadb.tool.UnsafePNodeLauncher' -Dexec.args=${serialNum}" +// startCmd !!; +// val localNodeAddress = _getLocalNodeAddressFromFile(new File(s"./itest/testdata/localnode${serialNum}.conf")) +// serialNum += 1; +// Thread.sleep(10000) +// localNodeAddress +// } + + def standAloneLocalPNodeServer(): NodeAddress = { + threadPool.execute(new UnsafePNodeThread(serialNum)) + print(22222) + Thread.sleep(10000) + val localNodeAddress = _getLocalNodeAddressFromFile(new File(s"./testdata/localnode${serialNum}.conf")) + serialNum += 1 + localNodeAddress + } + + def executeCypher(driver: Driver, cypher: String): StatementResult = { + val session = driver.session() + val tx = session.beginTransaction() + val result = tx.run(cypher) + tx.success() + tx.close() + session.close() + result + } + // For base test's use. + private def _getLocalNodeAddressFromFile(confFile: File): NodeAddress = { + val props = new Properties() + props.load(new FileInputStream(confFile)) + NodeAddress.fromString(props.getProperty("node.server.address")) + } + +} + +object ExampleText extends PandaDBTestBase { + val driver = GraphDatabase.driver(s"bolt://${standAloneLocalPNodeServer().getAsString}") +} +class ExampleText extends PandaDBTestBase { + @Test + def test1(): Unit = { + val localNodeAddress = standAloneLocalPNodeServer() + val cypher = "" + val result = executeCypher(ExampleText.driver, cypher) + } +} + +class UnsafePNodeThread(num: Int) extends Runnable{ + override def run(): Unit = { + //scalastyle:off + PNodeServerStarter.main(Array(s"./output/testdb/db${num}", + s"./testdata/localnode${num}.conf")); + } +} \ No newline at end of file diff --git a/itest/src/test/scala/cypher-plus/BlobValueTest.scala b/itest/src/test/scala/cypher-plus/BlobValueTest.scala new file mode 100644 index 00000000..40b49b04 --- /dev/null +++ b/itest/src/test/scala/cypher-plus/BlobValueTest.scala @@ -0,0 +1,218 @@ +///* +// * Copyright (c) 2002-2019 "Neo4j," +// * Neo4j Sweden AB [http://neo4j.com] +// * +// * This file is part of Neo4j. +// * +// * Neo4j is free software: you can redistribute it and/or modify +// * it under the terms of the GNU General Public License as published by +// * the Free Software Foundation, either version 3 of the License, or +// * (at your option) any later version. +// * +// * This program is distributed in the hope that it will be useful, +// * but WITHOUT ANY WARRANTY; without even the implied warranty of +// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// * GNU General Public License for more details. +// * +// * You should have received a copy of the GNU General Public License +// * along with this program. If not, see . +// */ +// +//import java.io.{File, FileInputStream} +//import java.net.URL +// +//import cn.pandadb.blob.Blob +//import cn.pandadb.driver.RemotePanda +//import cn.pandadb.server.PNodeServer +//import org.apache.commons.io.IOUtils +//import org.neo4j.driver._ +//import org.scalatest.{BeforeAndAfter, FunSuite} +// +//class BlobValueTest extends FunSuite with BeforeAndAfter with TestBase { +// var server: PNodeServer = _; +// +// before { +// setupNewDatabase(new File("./output/testdb/data/databases/graph.db")); +// server = PNodeServer.startServer(testDbDir, new File(testConfPath)); +// } +// +// after { +// server.shutdown() +// } +// +// test("test blob R/W via cypher") { +// val conn = RemotePanda.connect("bolt://localhost:7687"); +// //a non-blob +// val (node, name, age) = conn.querySingleObject("match (n) where n.name='bob' return n, n.name, n.age", (result: Record) => { +// (result.get("n").asNode(), result.get("n.name").asString(), result.get("n.age").asInt()) +// }); +// +// assert("bob" === name); +// assert(40 == age); +// +// val nodes = conn.queryObjects("match (n) return n", (result: Record) => { +// result.get("n").asNode() +// }); +// +// assert(2 == nodes.length); +// +// //blob +// val blob0 = conn.querySingleObject("return Blob.empty()", (result: Record) => { +// result.get(0).asBlob +// }); +// +// assert(0 == blob0.length); +// +// conn.querySingleObject("return Blob.fromFile('./testdata/test.png')", (result: Record) => { +// val blob1 = result.get(0).asBlob +// assert(new File("./testdata/test.png").length() == blob1.toBytes().length); +// blob1.offerStream(is => { +// //remote input stream should be closed +// is.read(); +// }) +// assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === +// blob1.toBytes()); +// 1; +// }); +// +// var blob20: Blob = null; +// +// conn.querySingleObject("match (n) where n.name='bob' return n.photo,n.album,Blob.len(n.photo) as len", (result: Record) => { +// val blob2 = result.get("n.photo").asBlob; +// blob20 = blob2; +// val album = result.get("n.album").asList(); +// val len = result.get("len").asInt() +// +// assert(len == new File("./testdata/test.png").length()); +// +// assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === +// blob2.offerStream { +// IOUtils.toByteArray(_) +// }); +// +// assert(6 == album.size()); +// +// assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === +// album.get(0).asInstanceOf[Blob].offerStream { +// IOUtils.toByteArray(_) +// }); +// assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === +// album.get(5).asInstanceOf[Blob].offerStream { +// IOUtils.toByteArray(_) +// }); +// }); +// +// //now, blob is unaccessible +// val ex = +// try { +// blob20.offerStream { +// IOUtils.toByteArray(_) +// }; +// +// false; +// } +// catch { +// case _ => true; +// } +// +// assert(ex); +// +// conn.querySingleObject("match (n) where n.name='alex' return n.photo", (result: Record) => { +// val blob3 = result.get("n.photo").asBlob +// assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test1.png"))) === +// blob3.offerStream { +// IOUtils.toByteArray(_) +// }); +// }); +// +// //query with parameters +// val blob4 = conn.querySingleObject("match (n) where n.name={NAME} return n.photo", +// Map("NAME" -> "bob"), (result: Record) => { +// result.get("n.photo").asBlob +// }); +// +// //commit new records +// conn.executeUpdate("CREATE (n {name:{NAME}})", +// Map("NAME" -> "张三")); +// +// conn.executeUpdate("CREATE (n {name:{NAME}, photo:{BLOB_OBJECT}})", +// Map("NAME" -> "张三", "BLOB_OBJECT" -> Blob.EMPTY)); +// +// conn.executeUpdate("CREATE (n {name:{NAME}, photo:{BLOB_OBJECT}})", +// Map("NAME" -> "张三", "BLOB_OBJECT" -> Blob.fromFile(new File("./testdata/test1.png")))); +// +// conn.executeQuery("return {BLOB_OBJECT}", +// Map("BLOB_OBJECT" -> Blob.fromFile(new File("./testdata/test.png")))); +// +// conn.querySingleObject("return {BLOB_OBJECT}", +// Map("BLOB_OBJECT" -> Blob.fromFile(new File("./testdata/test.png"))), (result: Record) => { +// val blob = result.get(0).asBlob +// +// assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === +// blob.offerStream { +// IOUtils.toByteArray(_) +// }); +// +// }); +// } +// +// test("test blob R/W via blob literal") { +// val conn = RemotePanda.connect("bolt://localhost:7687"); +// +// //blob +// val blob0 = conn.querySingleObject("return ", (result: Record) => { +// result.get(0).asBlob +// }); +// +// assert(0 == blob0.length); +// +// //blob +// val blob01 = conn.querySingleObject("return ", (result: Record) => { +// result.get(0).asBlob +// }); +// +// assert("this is an example".getBytes() === blob01.toBytes()); +// +// //test localfile +// conn.querySingleObject("return ", (result: Record) => { +// val blob1 = result.get(0).asBlob +// +// assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === +// blob1.toBytes()); +// }); +// +// //test http +// conn.querySingleObject("return ", (result: Record) => { +// val blob2 = result.get(0).asBlob +// assert(IOUtils.toByteArray(new URL("https://www.baidu.com/img/baidu_jgylogo3.gif")) === +// blob2.toBytes()); +// }); +// +// //test large files +// conn.querySingleObject("return ", (result: Record) => { +// val blob2 = result.get(0).asBlob +// assert(blob2.length > 10240) +// val bs = blob2.toBytes() +// assert(blob2.length === bs.length) +// +// val bs2 = IOUtils.toByteArray(new URL("http://img.mp.itc.cn/upload/20160512/5a4ccef302664806bb679a29c82209c5.jpg")) +// println(bs2.toList) +// println(bs.toList) +// +// assert(bs2.length === bs.length) +// assert(bs2 === bs); +// }); +// +// //test https +// val blob3 = conn.querySingleObject("return ", (result: Record) => { +// result.get(0).asBlob.toBytes() +// }); +// +// assert(IOUtils.toByteArray(new URL("https://avatars0.githubusercontent.com/u/2328905?s=460&v=4")) === +// blob3); +// +// assert(conn.querySingleObject("return Blob.len()", (result: Record) => { +// result.get(0).asInt +// }) == new File("./testdata/test.png").length()); +// } +//} diff --git a/itest/src/test/scala/cypher-plus/SemOpTest.scala b/itest/src/test/scala/cypher-plus/SemOpTest.scala new file mode 100644 index 00000000..70e818b4 --- /dev/null +++ b/itest/src/test/scala/cypher-plus/SemOpTest.scala @@ -0,0 +1,114 @@ +//import java.io.File +// +//import cn.pandadb.server.PNodeServer +//import org.apache.commons.io.FileUtils +//import org.junit.{Assert, Test} +// +//class SemOpTest extends TestBase { +// @Test +// def testLike(): Unit = { +// //create a new database +// val db = openDatabase(); +// val tx = db.beginTx(); +// +// Assert.assertEquals(true, db.execute("return Blob.empty() ~:0.5 Blob.empty() as r").next().get("r").asInstanceOf[Boolean]); +// Assert.assertEquals(true, db.execute("return Blob.empty() ~:0.5 Blob.empty() as r").next().get("r").asInstanceOf[Boolean]); +// Assert.assertEquals(true, db.execute("return Blob.empty() ~:1.0 Blob.empty() as r").next().get("r").asInstanceOf[Boolean]); +// +// Assert.assertEquals(true, db.execute("return Blob.empty() ~: Blob.empty() as r").next().get("r").asInstanceOf[Boolean]); +// +// Assert.assertEquals(true, db.execute( +// """return Blob.fromFile('./testdata/mayun1.jpeg') +// ~: Blob.fromFile('./testdata/mayun2.jpeg') as r""") +// .next().get("r").asInstanceOf[Boolean]); +// +// Assert.assertEquals(false, db.execute( +// """return Blob.fromFile('./testdata/mayun1.jpeg') +// ~: Blob.fromFile('./testdata/lqd.jpeg') as r""") +// .next().get("r").asInstanceOf[Boolean]); +// +// Assert.assertEquals(true, db.execute("""return Blob.fromFile('./testdata/car1.jpg') ~: '.*NB666.*' as r""") +// .next().get("r").asInstanceOf[Boolean]); +// +// tx.success(); +// tx.close(); +// db.shutdown(); +// } +// +// @Test +// def testCompare(): Unit = { +// //create a new database +// val db = openDatabase(); +// val tx = db.beginTx(); +// +// try { +// Assert.assertEquals(1.toLong, db.execute("return 1 :: 2 as r").next().get("r")); +// Assert.assertTrue(false); +// } +// catch { +// case _: Throwable => Assert.assertTrue(true); +// } +// +// Assert.assertEquals(true, +// db.execute("return :: as r").next().get("r").asInstanceOf[Double] > 0.7); +// Assert.assertEquals(true, +// db.execute("return :: as r").next().get("r").asInstanceOf[Double] > 0.6); +// Assert.assertEquals(true, +// db.execute("return '杜 一' :: '杜一' > 0.6 as r").next().get("r")); +// Assert.assertEquals(true, +// db.execute("return '杜 一' ::jaro '杜一' > 0.6 as r").next().get("r")); +// +// db.execute("return '杜 一' ::jaro '杜一','Zhihong SHEN' ::levenshtein 'SHEN Z.H'"); +// +// tx.success(); +// tx.close(); +// db.shutdown(); +// } +// +// @Test +// def testCustomProperty1(): Unit = { +// //create a new database +// val db = openDatabase(); +// val tx = db.beginTx(); +// +// Assert.assertEquals(new File("./testdata/car1.jpg").length(), +// db.execute("""return Blob.fromFile('./testdata/car1.jpg')->length as x""") +// .next().get("x")); +// +// Assert.assertEquals("image/jpeg", db.execute("""return Blob.fromFile('./testdata/car1.jpg')->mime as x""") +// .next().get("x")); +// +// Assert.assertEquals(500, db.execute("""return Blob.fromFile('./testdata/car1.jpg')->width as x""") +// .next().get("x")); +// +// Assert.assertEquals(333, db.execute("""return Blob.fromFile('./testdata/car1.jpg')->height as x""") +// .next().get("x")); +// +// Assert.assertEquals(333, db.execute("""return ->height as x""") +// .next().get("x")); +// +// Assert.assertEquals(null, db.execute("""return Blob.fromFile('./testdata/car1.jpg')->notExist as x""") +// .next().get("x")); +// +// tx.success(); +// tx.close(); +// db.shutdown(); +// } +// +// @Test +// def testCustomProperty2(): Unit = { +// //create a new database +// val db = openDatabase(); +// val tx = db.beginTx(); +// +// Assert.assertEquals("苏E730V7", db.execute("""return Blob.fromFile('./testdata/car1.jpg')->plateNumber as r""") +// .next().get("r")); +// +// Assert.assertEquals("我今天早上吃了两个包子", db.execute("""return Blob.fromFile('./testdata/test.wav')->message as r""") +// .next().get("r").asInstanceOf[Boolean]); +// +// tx.success(); +// tx.close(); +// db.shutdown(); +// } +//} \ No newline at end of file diff --git a/itest/src/test/scala/cypher-plus/TestBase.scala b/itest/src/test/scala/cypher-plus/TestBase.scala new file mode 100644 index 00000000..942e6032 --- /dev/null +++ b/itest/src/test/scala/cypher-plus/TestBase.scala @@ -0,0 +1,42 @@ +//import java.io.File +// +//import cn.pandadb.blob.Blob +//import org.apache.commons.io.FileUtils +//import org.neo4j.graphdb.GraphDatabaseService +// +///** +// * Created by bluejoe on 2019/4/13. +// */ +//trait TestBase { +// val testDbDir = new File("./output/testdb"); +// val testConfPath = new File("./testdata/neo4j.conf").getPath; +// +// def setupNewDatabase(dbdir: File = testDbDir, conf: String = testConfPath): Unit = { +// FileUtils.deleteDirectory(dbdir); +// //create a new database +// val db = openDatabase(dbdir, conf); +// val tx = db.beginTx(); +// //create a node +// val node1 = db.createNode(); +// +// node1.setProperty("name", "bob"); +// node1.setProperty("age", 40); +// +// //with a blob property +// node1.setProperty("photo", Blob.fromFile(new File("./testdata/test.png"))); +// //blob array +// node1.setProperty("album", (0 to 5).map(x => Blob.fromFile(new File("./testdata/test.png"))).toArray); +// +// val node2 = db.createNode(); +// node2.setProperty("name", "alex"); +// //with a blob property +// node2.setProperty("photo", Blob.fromFile(new File("./testdata/test1.png"))); +// node2.setProperty("age", 10); +// +// //node2.createRelationshipTo(node1, RelationshipType.withName("dad")); +// +// tx.success(); +// tx.close(); +// db.shutdown(); +// } +//} diff --git a/itest/src/test/scala/distributed/DistributedDataRecoverTest.scala b/itest/src/test/scala/distributed/DistributedDataRecoverTest.scala new file mode 100644 index 00000000..df3547c4 --- /dev/null +++ b/itest/src/test/scala/distributed/DistributedDataRecoverTest.scala @@ -0,0 +1,156 @@ +package distributed + +import java.util.concurrent.{ExecutorService, Executors} + +import cn.pandadb.network.ZKPathConfig +import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} +import org.apache.curator.retry.ExponentialBackoffRetry +import org.junit.{Assert, BeforeClass, Test} +import org.neo4j.driver.{AuthTokens, Driver, GraphDatabase, StatementResult} +import DistributedDataRecoverTest.{localPNodeServer0, localPNodeServer1, neoDriver0, neoDriver1, pandaDriver, threadPool} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 17:37 2019/12/4 + * @Modified By: + */ + +object DistributedDataRecoverTest { + @BeforeClass + val zkString = "10.0.86.26:2181,10.0.86.27:2181,10.0.86.70:2181" + val pandaString = s"panda://" + zkString + "/db" + val pandaDriver = GraphDatabase.driver(pandaString, AuthTokens.basic("", "")) + val curator: CuratorFramework = CuratorFrameworkFactory.newClient(zkString, + new ExponentialBackoffRetry(1000, 3)); + curator.start() + + val node0 = "159.226.193.204:7684" + val zkMasterPath = ZKPathConfig.leaderNodePath + s"/${node0}" + + val node1 = "159.226.193.204:7685"; + val zkSlavePath = ZKPathConfig.ordinaryNodesPath + s"/${node1}" + + val neoDriver0 = GraphDatabase.driver(s"bolt://${node0}") + val neoDriver1 = GraphDatabase.driver(s"bolt://${node1}") + + val localPNodeServer0 = new LocalServerThread(0) + val localPNodeServer1 = new LocalServerThread(1) + val threadPool: ExecutorService = Executors.newFixedThreadPool(2) +} + +class DistributedDataRecoverTest { + + val time = "12:41" + val time2 = "12:42" + val time3 = "12:43" + + // only start node0 + @Test + def test1(): Unit = { + // no data in cluster + threadPool.execute(localPNodeServer0) + Thread.sleep(10000) + + _executeStatement(neoDriver0, "Match(n) Delete n;") + val result = pandaDriver.session().run("Match(n) Return n;") + Assert.assertEquals(false, result.hasNext) + + // create a node + _executeStatement(pandaDriver, s"Create(n:Test{time:'${time}'});") + + // query by panda driver + val clusterResult = _executeStatement(pandaDriver, "Match(n) Return n;") + Assert.assertEquals(time, clusterResult.next().get("n").get("time").asString()) + + // query by neo4j driver + val nResult1 = _executeStatement(neoDriver0, "Match(n) Return n;") + Assert.assertEquals(time.toString, nResult1.next().get("n").get("time").asString()) + + // slave node hasn't started. + val exists = DistributedDataRecoverTest.curator.checkExists() + .forPath(DistributedDataRecoverTest.zkSlavePath) + Assert.assertEquals(null, exists) + // show the master dataVersionLog + + // slave data is updated + threadPool.execute(localPNodeServer1) + Thread.sleep(15000) +// val slaveResult = neoDriver1.session.run("Match(n) Return n;") +// Assert.assertEquals(time, slaveResult.next().get("n.time").asString()) + } + + // run the slave node here +// @Test +// def test2(): Unit = { +// +// } + +// @Test +// def test3(): Unit = { +// // create a new node +// DistributedDataRecoverTest.driver.session() +// .run(s"Create(n:Test2{time2:'${time2}'})") +// +// // only one node created. +// val clusterResult = DistributedDataRecoverTest.driver.session() +// .run(s"Match(n) Where n.time2='${time2}'") +// Assert.assertEquals(true, clusterResult.hasNext) +// Assert.assertEquals(time2, clusterResult.next().get("n.time2")) +// Assert.assertEquals(false, clusterResult.hasNext) +// } +// +// // close slave here. +// @Test +// def test4(): Unit = { +// DistributedDataRecoverTest.driver.session().run(s"Match(n) Delete n;") +// DistributedDataRecoverTest.driver.session().run(s"Create(n:Test3);") +// DistributedDataRecoverTest.driver.session().run(s"Match(n) Set n.time3 = '${time3}';") +// +// val clusterResult = DistributedDataRecoverTest.driver.session().run(s"Match(n) Return n;") +// Assert.assertEquals(time3, clusterResult.next().get("n.time3")) +// } +// +// // close master here. +// // no server in the cluster now +// // start slave here. +// @Test +// def test5(): Unit = { +// Assert.assertEquals(null, +// DistributedDataRecoverTest.curator.checkExists().forPath(DistributedDataRecoverTest.zkSlavePath)) +// +// Assert.assertEquals(null, +// DistributedDataRecoverTest.curator.checkExists().forPath(DistributedDataRecoverTest.zkMasterPath)) +// } +// +// // start master here. +// @Test +// def test6(): Unit = { +// +// Assert.assertEquals(false, +// DistributedDataRecoverTest.curator.checkExists().forPath(DistributedDataRecoverTest.zkSlavePath) == null) +// +// Assert.assertEquals(false, +// DistributedDataRecoverTest.curator.checkExists().forPath(DistributedDataRecoverTest.zkMasterPath) == null) +// +// val clusterResult = DistributedDataRecoverTest.driver.session().run(s"Match(n) Return n;") +// Assert.assertEquals(time3, clusterResult.next().get("n.time3")) +// +// val masterResult = GraphDatabase.driver(DistributedDataRecoverTest.node0).session().run(s"Match(n) Return n;") +// Assert.assertEquals(time3, masterResult.next().get("n.time3")) +// +// val slaveResult = GraphDatabase.driver(DistributedDataRecoverTest.node0).session().run(s"Match(n) Return n;") +// Assert.assertEquals(time3, slaveResult.next().get("n.time3")) +// } + + private def _executeStatement(driver: Driver, cypher: String): StatementResult = { + val session = driver.session() + val tx = session.beginTransaction() + val result = tx.run(cypher) + tx.success() + tx.close() + session.close() + result + } + +} diff --git a/itest/src/test/scala/distributed/DriverTest.scala b/itest/src/test/scala/distributed/DriverTest.scala new file mode 100644 index 00000000..44f9302d --- /dev/null +++ b/itest/src/test/scala/distributed/DriverTest.scala @@ -0,0 +1,102 @@ +package distributed + +import java.io.{File, FileInputStream} +import java.util.Properties + +import org.junit.{Assert, Test} +import org.neo4j.driver.{AuthTokens, GraphDatabase, Transaction, TransactionWork} + +/** + * Created by bluejoe on 2019/11/21. + */ +class DriverTest { + + val configFile = new File("./testdata/gnode0.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val pandaString = s"panda://" + props.getProperty("zkServerAddress") + s"/db" + + @Test + def test0() { + val driver = GraphDatabase.driver(pandaString, + AuthTokens.basic("", "")); + var results1 = driver.session().run("create (n:person{name:'bluejoe'})"); + val results = driver.session().run("match (n:person) return n"); + + val result = results.next(); + Assert.assertEquals("bluejoe", result.get("n").asNode().get("name").asString()); + val results2 = driver.session().run("match (n:person) delete n"); + driver.close(); + } + @Test + def test1() { + val driver = GraphDatabase.driver(pandaString, + AuthTokens.basic("", "")); + val session = driver.session(); + var results1 = session.run("create (n:person{name:'bluejoe'})"); + val results = session.run("match (n:person) return n"); + val result = results.next(); + Assert.assertEquals("bluejoe", result.get("n").asNode().get("name").asString()); + val results2 = session.run("match (n:person) delete n"); + session.close(); + driver.close(); + } + + //test transaction + @Test + def test2() { + val driver = GraphDatabase.driver(pandaString, + AuthTokens.basic("", "")); + val session = driver.session(); + val transaction = session.beginTransaction() + var results1 = transaction.run("create (n:person{name:'bluejoe'})"); + + results1 = transaction.run("create (n:people{name:'lin'})"); + val results = transaction.run("match (n:person) return n.name"); + + val result = results.next(); + Assert.assertEquals("bluejoe", result.get("n.name").asString()); + + val results3 = transaction.run("match (n) return n.name"); + Assert.assertEquals(2, results3.list().size()) + + val results2 = transaction.run("match (n) delete n"); + + Assert.assertEquals(0, results2.list().size()) + + transaction.success() + transaction.close() + session.close(); + driver.close(); + } + + @Test + def test3() { + val driver = GraphDatabase.driver(pandaString, + AuthTokens.basic("", "")); + var session = driver.session() + + val result = session.writeTransaction(new TransactionWork[Unit] { + override def execute(transaction: Transaction): Unit = { + val res1 = transaction.run("create (n:person{name:'bluejoe'})") + } + }) + //session = driver.session() + val result2 = session.readTransaction(new TransactionWork[Unit] { + override def execute(transaction: Transaction): Unit = { + val res2 = transaction.run("match (n:person) return n.name") + Assert.assertEquals("bluejoe", res2.next().get("n.name").asString()); + } + }) + //session = driver.session() + val result3 = session.writeTransaction(new TransactionWork[Unit] { + override def execute(transaction: Transaction): Unit = { + val res3 = transaction.run("match (n) delete n") + //Assert.assertEquals("bluejoe", res2.next().get("name").asString()); + } + }) + session.close(); + driver.close(); + } + +} diff --git a/itest/src/test/scala/distributed/LocalDataVersionRecoveryTest.scala b/itest/src/test/scala/distributed/LocalDataVersionRecoveryTest.scala new file mode 100644 index 00000000..2145a5b3 --- /dev/null +++ b/itest/src/test/scala/distributed/LocalDataVersionRecoveryTest.scala @@ -0,0 +1,71 @@ +package distributed + +import java.io.{File, FileInputStream} +import java.util.Properties +import java.util.concurrent.{ExecutorService, Executors} + +import cn.pandadb.network.NodeAddress +import cn.pandadb.server.{DataVersionRecoveryArgs, LocalDataVersionRecovery} +import distributed.LocalDataVersionRecoveryTest.{neodriver, recovery} +import org.junit.{Assert, BeforeClass, Test} +import org.neo4j.driver.{Driver, GraphDatabase} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 16:18 2019/12/3 + * @Modified By: + */ +object LocalDataVersionRecoveryTest { + + val localLogFile = new File("./src/test/resources/localLog.json") + val clusterLogFile = new File("./src/test/resources/clusterLog.json") + + val localPNodeServer = new LocalServerThread(0) + + val confFile = new File("../itest/testdata/localnode0.conf") + val props = new Properties() + props.load(new FileInputStream(confFile)) + val localNodeAddress = NodeAddress.fromString(props.getProperty("node.server.address")) + + val recoveryArgs = DataVersionRecoveryArgs(localLogFile, clusterLogFile, localNodeAddress) + val recovery = new LocalDataVersionRecovery(recoveryArgs) + + val neodriver: Driver = { + GraphDatabase.driver(s"bolt://" + localNodeAddress.getAsString) + } + + @BeforeClass + private def _startServer(): Unit = { + val threadPool: ExecutorService = Executors.newFixedThreadPool(1) + threadPool.execute(localPNodeServer) + Thread.sleep(10000) + } + _startServer() + +} + +class LocalDataVersionRecoveryTest { + + @Test + def test1(): Unit = { + val _session = neodriver.session() + val beforeResult = _session.run("Match(n) Return(n)") + Assert.assertEquals(false, beforeResult.hasNext) + _session.close() + } + + @Test + def test2(): Unit = { + recovery.updateLocalVersion() + val _session = neodriver.session() + val afterResult = _session.run("Match(n) return n;") + Assert.assertEquals(true, afterResult.hasNext) + + while (afterResult.hasNext) { + Assert.assertEquals(3.toInt, afterResult.next().get("n").asMap().get("version").toString.toInt) + } + _session.run("Match(n) Delete n") + } + +} diff --git a/itest/src/test/scala/distributed/LocalMultiThreadLauncher.scala b/itest/src/test/scala/distributed/LocalMultiThreadLauncher.scala new file mode 100644 index 00000000..a9da2023 --- /dev/null +++ b/itest/src/test/scala/distributed/LocalMultiThreadLauncher.scala @@ -0,0 +1,18 @@ +package distributed + +import java.io.File + +import cn.pandadb.server.PNodeServer +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 8:59 2019/12/24 + * @Modified By: + */ + +class LocalServerThread(num: Int) extends Runnable { + override def run(): Unit = { + PNodeServer.startServer(new File(s"../itest/output/testdb/db${num}"), + new File(s"../itest/testdata/localnode${num}.conf")) + } +} \ No newline at end of file diff --git a/itest/src/test/scala/distributed/PNodeServerStarterTest.scala b/itest/src/test/scala/distributed/PNodeServerStarterTest.scala new file mode 100644 index 00000000..2856128e --- /dev/null +++ b/itest/src/test/scala/distributed/PNodeServerStarterTest.scala @@ -0,0 +1,14 @@ +package distributed + +import cn.pandadb.tool.PNodeServerStarter + +/** + * Created by bluejoe on 2019/11/24. + */ +object PNodeServerStarterTest { + def main(args: Array[String]) { + val num = 0 + PNodeServerStarter.main(Array(s"./itest/output/testdb/db${num}", + s"./itest/testdata/localnode${num}.conf")); + } +} \ No newline at end of file diff --git a/itest/src/test/scala/distributed/PandaDriverTest.scala b/itest/src/test/scala/distributed/PandaDriverTest.scala new file mode 100644 index 00000000..d6f17969 --- /dev/null +++ b/itest/src/test/scala/distributed/PandaDriverTest.scala @@ -0,0 +1,175 @@ +package distributed + +import java.io.{File, FileInputStream} +import java.util.{Locale, Properties} + +import cn.pandadb.driver.PandaDriver +import cn.pandadb.network.ZKPathConfig +import distributed.PandaDriverTest.{neoDriver0, neoDriver1, pandaDriver} +import org.apache.curator.framework.CuratorFrameworkFactory +import org.apache.curator.retry.ExponentialBackoffRetry +import org.junit.runners.MethodSorters +import org.junit._ +import org.neo4j.driver.{AuthTokens, GraphDatabase} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 16:58 2019/12/7 + * @Modified By: + */ + +// todo: test n.prop +object PandaDriverTest { + val configFile = new File("./testdata/gnode0.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val pandaString = s"panda://" + props.getProperty("zkServerAddress") + s"/db" + + ZKPathConfig.initZKPath(props.getProperty("zkServerAddress")) + // correct these two addresses please + val node0 = "bolt://localhost:7684" + val node1 = "bolt://localhost:7685" + + val pandaDriver = GraphDatabase.driver(pandaString, AuthTokens.basic("", "")) + val neoDriver0 = GraphDatabase.driver(node0, AuthTokens.basic("", "")) + val neoDriver1 = GraphDatabase.driver(node1, AuthTokens.basic("", "")) + + @BeforeClass + def deleteAllData(): Unit = { + pandaDriver.session().run("Match(n) Delete n;") + Thread.sleep(1500) + } + + @AfterClass + def deleteVersion(): Unit = { + pandaDriver.session().run("Match(n) Delete n;") + val curator = CuratorFrameworkFactory.newClient(props.getProperty("zkServerAddress"), + new ExponentialBackoffRetry(1000, 3)) + curator.start() + curator.delete().forPath("/testPandaDB/version") + curator.close() + } +} + +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +class PandaDriverTest { + + // check the type + @Test + def test0(): Unit = { + // scalastyle:off + Assert.assertEquals(true, pandaDriver.isInstanceOf[PandaDriver]) + Assert.assertEquals("class org.neo4j.driver.internal.InternalDriver", neoDriver0.getClass.toString) + Assert.assertEquals("class org.neo4j.driver.internal.InternalDriver", neoDriver1.getClass.toString) + } + + // make sure the database is blank + @Test + def test1(): Unit = { + val session = pandaDriver.session() + val tx = session.beginTransaction() + tx.run("Match(n) Delete n;") + tx.success() + tx.close() + session.close() + val clusterResult = pandaDriver.session().run("Match(n) Return n;") + val node0Result = neoDriver0.session().run("Match(n) Return n;") + val node1Result = neoDriver1.session().run("Match(n) Return n;") + Assert.assertEquals(false, clusterResult.hasNext) + Assert.assertEquals(false, node0Result.hasNext) + Assert.assertEquals(false, node1Result.hasNext) + } + + @Test + def test2(): Unit = { + val session = pandaDriver.session() + val tx = session.beginTransaction() + tx.run("Create(n:Test{prop:'panda'})") + tx.success() + tx.close() + session.close() + _createAndMerge() + } + + @Test + def test3(): Unit = { + val session = pandaDriver.session() + val tx = session.beginTransaction() + tx.run("Merge(n:Test{prop:'panda'})") + tx.success() + tx.close() + session.close() + _createAndMerge() + } + + @Test + def test4(): Unit = { + + val session = pandaDriver.session() + val tx = session.beginTransaction() + tx.run("Create(n:Test{prop:'panda'})") + tx.success() + tx.close() + session.close() + + val session1 = pandaDriver.session() + val tx1 = session1.beginTransaction() + tx1.run("Merge(n:Test{prop:'panda'})") + tx1.close() + session1.close() + _createAndMerge() + } + + @Test + def test5(): Unit = { + val session = pandaDriver.session() + val tx = session.beginTransaction() + tx.run("Create(n:Test)") + tx.success() + tx.close() + session.close() + + val session1 = pandaDriver.session() + val tx1 = session1.beginTransaction() + tx1.run("Match(n) Set n.prop='panda'") + tx1.success() + tx1.close() + session1.close() + + _createAndMerge() + } + + //add a test, to test different menthod to run statement. + private def _createAndMerge(): Unit = { + // Problem: the result is not available real-time. + val clusterResult = pandaDriver.session().run("Match(n) Return n") + val node0Result = neoDriver0.session().run("Match(n) Return n") + val node1Result = neoDriver1.session().run("Match(n) Return n") + Assert.assertEquals(true, clusterResult.hasNext) + Assert.assertEquals("panda", clusterResult.next().get("n").asNode().get("prop").asString()) + Assert.assertEquals(false, clusterResult.hasNext) + + Assert.assertEquals(true, node0Result.hasNext) + Assert.assertEquals("panda", node0Result.next().get("n").asNode().get("prop").asString()) + Assert.assertEquals(false, node0Result.hasNext) + + Assert.assertEquals(true, node1Result.hasNext) + Assert.assertEquals("panda", node1Result.next().get("n").asNode().get("prop").asString()) + Assert.assertEquals(false, node1Result.hasNext) + + val session = pandaDriver.session() + val tx = session.beginTransaction() + tx.run("Match(n) Delete n;") + tx.success() + tx.close() + session.close() + val clusterResult1 = pandaDriver.session().run("Match(n) Return n") + val node0Result1 = neoDriver0.session().run("Match(n) Return n") + val node1Result1 = neoDriver1.session().run("Match(n) Return n") + Assert.assertEquals(false, clusterResult1.hasNext) + Assert.assertEquals(false, node0Result1.hasNext) + Assert.assertEquals(false, node1Result1.hasNext) + } + +} diff --git a/itest/src/test/scala/distributed/StatesTest.scala b/itest/src/test/scala/distributed/StatesTest.scala new file mode 100644 index 00000000..47c6d00e --- /dev/null +++ b/itest/src/test/scala/distributed/StatesTest.scala @@ -0,0 +1,17 @@ +package distributed + +class StatesTest { +/* + test whether the flag is right in different state and the system behave as supposed. + State List: + Serving(locked/locked) <-- + PreWrite | + Write | + Finish ---- + Flag: + READY_TO_WRITE true, when there is no served request + WRITE_TO_FINISHED true, when all the gnodes finish the write operation + WAIT_JOIN (only active in serving state) true, if there is a gnode waits for joining in the cluster + + */ +} diff --git a/itest/src/test/scala/distributed/WriteTest.scala b/itest/src/test/scala/distributed/WriteTest.scala new file mode 100644 index 00000000..9b89779b --- /dev/null +++ b/itest/src/test/scala/distributed/WriteTest.scala @@ -0,0 +1,37 @@ +package distributed + +import org.junit.Test +import org.neo4j.driver.{AuthTokens, Driver, GraphDatabase, StatementResult} + +import scala.collection.mutable.ListBuffer +import scala.concurrent.Future + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 13:41 2019/11/30 + * @Modified By: + */ +class WriteTest { + val pandaDriver: Driver = GraphDatabase.driver("panda://10.0.86.26:2181/db", AuthTokens.basic("", "")) + val cypherList = List("Create(n:Test{name:'alice'});", "Create(n:Test{age:10});", "Merge(n:Test{name:'alice'});") +// + @Test + def test1(): Unit = { + val taskList: ListBuffer[Future[StatementResult]] = new ListBuffer[Future[StatementResult]] + cypherList.foreach(cypher => { + execute(pandaDriver, cypher) + }) + + } + + def execute(driver: Driver, cypher: String): StatementResult = { + val session = driver.session() + val tx = session.beginTransaction() + val statementResult = tx.run(cypher) + tx.success() + tx.close() + session.close() + statementResult + } +} diff --git a/itest/src/test/scala/external-properties/InEsPropertyTest.scala b/itest/src/test/scala/external-properties/InEsPropertyTest.scala new file mode 100644 index 00000000..e53021c4 --- /dev/null +++ b/itest/src/test/scala/external-properties/InEsPropertyTest.scala @@ -0,0 +1,195 @@ + + +package Externalproperties + +import java.io.{File, FileInputStream} +import java.util.Properties + +import scala.collection.JavaConversions._ +import org.apache.http.HttpHost +import org.junit.{Assert, Test} +import org.elasticsearch.client.{RequestOptions, RestClient, RestHighLevelClient} +import org.elasticsearch.action.admin.indices.create.{CreateIndexRequest, CreateIndexResponse} +import org.elasticsearch.action.index.{IndexRequest, IndexResponse} +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.action.delete.{DeleteRequest, DeleteResponse} +import org.elasticsearch.action.get.{GetRequest, GetResponse} +import org.elasticsearch.action.update.{UpdateRequest, UpdateResponse} +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.common.Strings +import org.elasticsearch.index.query.{QueryBuilder, QueryBuilders} +import org.elasticsearch.search.fetch.subphase.FetchSourceContext +import org.elasticsearch.search.builder.SearchSourceBuilder +import org.elasticsearch.action.search.{SearchRequest, SearchResponse} +import org.elasticsearch.index.reindex.{BulkByScrollResponse, DeleteByQueryRequest} +import org.elasticsearch.script.ScriptType +import org.elasticsearch.script.mustache.SearchTemplateRequest +import org.elasticsearch.client.RequestOptions +import org.elasticsearch.script.mustache.SearchTemplateResponse +import cn.pandadb.externalprops.{InElasticSearchPropertyNodeStore, NodeWithProperties} +import org.neo4j.values.storable.Values +import com.alibaba.fastjson.JSONObject + +/** + * Created by codeBabyLin on 2019/12/5. + */ + +class InEsPropertyTest { + val host = "10.0.82.216" + val port = 9200 + val indexName = "test0113" + val typeName = "doc" + + val httpHost = new HttpHost(host, port, "http") + val builder = RestClient.builder(httpHost) + val client = new RestHighLevelClient(builder) + + @Test + def test1(): Unit = { + val esNodeStore = new InElasticSearchPropertyNodeStore(host, port, indexName, typeName) + esNodeStore.clearAll() + Assert.assertEquals(0, esNodeStore.getRecorderSize) + var transaction = esNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addLabel(1, "person") + transaction.addLabel(1, "people") + transaction.commit() + transaction.close() + Assert.assertEquals(1, esNodeStore.getRecorderSize) + transaction = esNodeStore.beginWriteTransaction() + var label = transaction.getNodeLabels(1) + Assert.assertEquals(2, label.size) + Assert.assertEquals("person", label.head) + Assert.assertEquals("people", label.last) + transaction.removeLabel(1, "people") + label = transaction.getNodeLabels(1) + Assert.assertEquals(1, label.size) + Assert.assertEquals("person", label.head) + transaction.commit() + transaction.close() + esNodeStore.clearAll() + Assert.assertEquals(0, esNodeStore.getRecorderSize) + } + + // test for node property add and remove + @Test + def test2() { + val esNodeStore = new InElasticSearchPropertyNodeStore(host, port, indexName, typeName) + esNodeStore.clearAll() + Assert.assertEquals(0, esNodeStore.getRecorderSize) + var transaction = esNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addProperty(1, "database", Values.of("pandaDB")) + + transaction.commit() + + transaction.close() + Assert.assertEquals(1, esNodeStore.getRecorderSize) + transaction = esNodeStore.beginWriteTransaction() + val name = transaction.getPropertyValue(1, "database") + Assert.assertEquals("pandaDB", name.get.asObject()) + + transaction.removeProperty(1, "database") + transaction.commit() + transaction.close() + Assert.assertEquals(1, esNodeStore.getRecorderSize) + + val node = esNodeStore.getNodeById(1).head.mutable() + Assert.assertEquals(true, node.props.isEmpty) + + esNodeStore.clearAll() + Assert.assertEquals(0, esNodeStore.getRecorderSize) + } + + //test for undo + @Test + def test3() { + val esNodeStore = new InElasticSearchPropertyNodeStore(host, port, indexName, typeName) + esNodeStore.clearAll() + Assert.assertEquals(0, esNodeStore.getRecorderSize) + var transaction = esNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addNode(2) + transaction.addLabel(1, "person") + transaction.addProperty(2, "name", Values.of("pandaDB")) + val undo = transaction.commit() + Assert.assertEquals(2, esNodeStore.getRecorderSize) + val node1 = esNodeStore.getNodeById(1) + val node2 = esNodeStore.getNodeById(2) + Assert.assertEquals("person", node1.head.mutable().labels.head) + Assert.assertEquals("pandaDB", node2.head.mutable().props.get("name").get.asObject()) + undo.undo() + transaction.close() + Assert.assertEquals(0, esNodeStore.getRecorderSize) + } + + // test for node property value + @Test + def test4() { + val esNodeStore = new InElasticSearchPropertyNodeStore(host, port, indexName, typeName) + esNodeStore.clearAll() + Assert.assertEquals(0, esNodeStore.getRecorderSize) + var transaction = esNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addLabel(1, "Person") + transaction.addLabel(1, "Man") + transaction.addProperty(1, "name", Values.of("test")) + transaction.addProperty(1, "arr1", Values.of(100)) + transaction.addProperty(1, "arr2", Values.of(155.33)) + transaction.addProperty(1, "arr3", Values.of(true)) + transaction.addProperty(1, "arr4", Values.of(Array(1, 2, 3))) + Assert.assertEquals(0, esNodeStore.getRecorderSize) + transaction.commit() + transaction.close() + Assert.assertEquals(1, esNodeStore.getRecorderSize) + val node: NodeWithProperties = esNodeStore.getNodeById(1).get + Assert.assertEquals(1, node.id) + val nodeLabels = node.labels.toArray + Assert.assertEquals(2, nodeLabels.size) + Assert.assertEquals(true, nodeLabels.contains("Man") && nodeLabels.contains("Person")) + assert(node.props.get("name").get.equals("test")) + assert(node.props.get("arr1").get.equals(100)) + assert(node.props.get("arr2").get.equals(155.33)) + assert(node.props.get("arr3").get.equals(true)) + assert(node.props.get("arr4").get.equals(Array(1, 2, 3))) + } + + // test for serach + @Test + def test5() { + val esNodeStore = new InElasticSearchPropertyNodeStore(host, port, indexName, typeName) + esNodeStore.clearAll() + Assert.assertEquals(0, esNodeStore.getRecorderSize) + var transaction = esNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addLabel(1, "Person") + transaction.addLabel(1, "Man") + transaction.addProperty(1, "name", Values.of("test")) + transaction.addProperty(1, "arr1", Values.of(100)) + transaction.addProperty(1, "arr2", Values.of(155.33)) + transaction.addProperty(1, "arr3", Values.of(true)) + transaction.addProperty(1, "arr4", Values.of(Array(1, 2, 3))) + Assert.assertEquals(0, esNodeStore.getRecorderSize) + transaction.commit() + transaction.close() + + Assert.assertEquals(1, esNodeStore.getRecorderSize) + + val nodeIds = esNodeStore.filterNodes(QueryBuilders.termQuery("name", "test")) + assert(nodeIds.size == 1) + nodeIds.foreach(id => assert(id == 1)) + + val nodesWithProps = esNodeStore.filterNodesWithProperties(QueryBuilders.termQuery("name", "test")) + + val node: NodeWithProperties = nodesWithProps.toList(0) + Assert.assertEquals(1, node.id) + val nodeLabels = node.labels.toArray + Assert.assertEquals(2, nodeLabels.size) + Assert.assertEquals(true, nodeLabels.contains("Man") && nodeLabels.contains("Person")) + assert(node.props.get("name").get.equals("test")) + assert(node.props.get("arr1").get.equals(100)) + assert(node.props.get("arr2").get.equals(155.33)) + assert(node.props.get("arr3").get.equals(true)) + assert(node.props.get("arr4").get.equals(Array(1, 2, 3))) + } +} diff --git a/itest/src/test/scala/external-properties/InMemPropertyTest.scala b/itest/src/test/scala/external-properties/InMemPropertyTest.scala new file mode 100644 index 00000000..300b48c9 --- /dev/null +++ b/itest/src/test/scala/external-properties/InMemPropertyTest.scala @@ -0,0 +1,111 @@ +package Externalproperties + +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.externalprops.InMemoryPropertyNodeStore +import org.junit.{Assert, Test} +import org.neo4j.driver.{AuthTokens, GraphDatabase, Transaction, TransactionWork} +import org.neo4j.values.AnyValues +import org.neo4j.values.storable.Values + +class InMemPropertyTest { + + //test node add 、delete,and property add and remove + @Test + def test1() { + InMemoryPropertyNodeStore.nodes.clear() + Assert.assertEquals(0, InMemoryPropertyNodeStore.nodes.size) + var transaction = InMemoryPropertyNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.commit() + Assert.assertEquals(1, InMemoryPropertyNodeStore.nodes.size) + transaction = InMemoryPropertyNodeStore.beginWriteTransaction() + transaction.addProperty(1, "name", Values.of("bluejoe")) + var name = transaction.getPropertyValue(1, "name") + Assert.assertEquals("bluejoe", name.get.asObject()) + + transaction.removeProperty(1, "name") + name = transaction.getPropertyValue(1, "name") + Assert.assertEquals(None, name) + + transaction.deleteNode(1) + transaction.commit() + transaction.close() + Assert.assertEquals(0, InMemoryPropertyNodeStore.nodes.size) + + } + + //test label add and removed + @Test + def test2() { + InMemoryPropertyNodeStore.nodes.clear() + Assert.assertEquals(0, InMemoryPropertyNodeStore.nodes.size) + val transaction = InMemoryPropertyNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addLabel(1, "person") + var label = transaction.getNodeLabels(1) + + Assert.assertEquals("person", label.head) + + transaction.removeLabel(1, "person") + label = transaction.getNodeLabels(1) + Assert.assertEquals(true, label.isEmpty) + transaction.deleteNode(1) + transaction.commit() + transaction.close() + Assert.assertEquals(0, InMemoryPropertyNodeStore.nodes.size) + + } + + @Test + def test3() { + InMemoryPropertyNodeStore.nodes.clear() + Assert.assertEquals(0, InMemoryPropertyNodeStore.nodes.size) + val transaction = InMemoryPropertyNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addLabel(1, "person") + var label = transaction.getNodeLabels(1) + + Assert.assertEquals("person", label.head) + + val redo = transaction.commit() + + Assert.assertEquals(1, InMemoryPropertyNodeStore.nodes.size) + Assert.assertEquals("person", InMemoryPropertyNodeStore.nodes.get(1).get.mutable().labels.head) + + redo.undo() + transaction.commit() + redo.undo() + transaction.close() + + Assert.assertEquals(0, InMemoryPropertyNodeStore.nodes.size) + + } + + //test label add + @Test + def test4() { + InMemoryPropertyNodeStore.nodes.clear() + Assert.assertEquals(0, InMemoryPropertyNodeStore.nodes.size) + val transaction = InMemoryPropertyNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addLabel(1, "person") + var label = transaction.getNodeLabels(1) + + Assert.assertEquals("person", label.head) + + transaction.addLabel(1, "Man") + label = transaction.getNodeLabels(1) + assert(label.size == 2 && label.contains("Man") && label.contains("person")) + + transaction.commit() + transaction.close() + + Assert.assertEquals(1, InMemoryPropertyNodeStore.nodes.size) + val node = InMemoryPropertyNodeStore.getNodeById(1) + val labels = node.get.mutable().labels + assert(labels.size == 2 && labels.contains("person") && labels.contains("Man")) + } + +} diff --git a/itest/src/test/scala/external-properties/InSolrArrayTest.scala b/itest/src/test/scala/external-properties/InSolrArrayTest.scala new file mode 100644 index 00000000..ee68bf3f --- /dev/null +++ b/itest/src/test/scala/external-properties/InSolrArrayTest.scala @@ -0,0 +1,121 @@ + + +package Externalproperties + +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.externalprops._ +import cn.pandadb.server.PNodeServer +import cn.pandadb.util.GlobalContext +import org.junit.{After, Assert, Before, Test} +import org.neo4j.driver.{AuthTokens, GraphDatabase, Transaction, TransactionWork} +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.graphdb.factory.GraphDatabaseFactory +import org.neo4j.io.fs.FileUtils +import org.neo4j.values.{AnyValue, AnyValues} +import org.neo4j.values.storable.{BooleanArray, LongArray, StringArray, Values} + +/** + * Created by codeBabyLin on 2019/12/5. + */ +trait QueryTestBase { + var db: GraphDatabaseService = null + val nodeStore = "InSolrPropertyNodeStore" + + @Before + def initdb(): Unit = { + PNodeServer.toString + new File("./output/testdb").mkdirs(); + FileUtils.deleteRecursively(new File("./output/testdb")); + db = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(new File("./output/testdb")). + newGraphDatabase() + nodeStore match { + case "InMemoryPropertyNodeStore" => + InMemoryPropertyNodeStore.nodes.clear() + ExternalPropertiesContext.bindCustomPropertyNodeStore(InMemoryPropertyNodeStore) + + case "InSolrPropertyNodeStore" => + val configFile = new File("./testdata/neo4j.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + ExternalPropertiesContext.bindCustomPropertyNodeStore(solrNodeStore) + GlobalContext.setLeaderNode(true) + } + } + + @After + def shutdowndb(): Unit = { + db.shutdown() + } + + protected def testQuery[T](query: String): Unit = { + val tx = db.beginTx(); + val rs = db.execute(query); + while (rs.hasNext) { + val row = rs.next(); + } + tx.success(); + tx.close() + } +} +trait CreateQueryTestBase extends QueryTestBase { + +} + +class InSolrArrayTest extends CreateQueryTestBase { + + val configFile = new File("./testdata/neo4j.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + + //val collectionName = "test" + + //test for node label add and remove + @Test + def test1() { + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + + // create node with Array type property value + val query = + """CREATE (n1:Person { name:'test01',titles:["ceo","ui","dev"], + |salaries:[10000,20000,30597,500954], boolattr:[False,True,false,true]}) + |RETURN id(n1) + """.stripMargin + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next () + id1 = row.get("id(n1)").toString.toLong + } + Assert.assertEquals(1, solrNodeStore.getRecorderSize) + val res = solrNodeStore.getNodeById(0) + val titles = Array("ceo", "ui", "dev") + val salaries = Array(10000, 20000, 30597, 500954) + val boolattr = Array(false, true, false, true) + + //scalastyle:off println + + println(res) + // println(res.get.props.get("titles").get.asObject().getClass) + // println(res.get.props.get("salaries").get.asObject().getClass) + // println(res.get.props.get("boolattr").get.asObject().getClass) + // Assert.assertEquals(3, res.get.props.get("titles").get.asInstanceOf[StringArray].length()) + Assert.assertEquals(4, res.get.props.get("salaries").get.asInstanceOf[LongArray].length()) + Assert.assertEquals(4, res.get.props.get("boolattr").get.asInstanceOf[BooleanArray].length()) + //Assert.assertEquals(boolattr, res.get.props.get("boolattr").get.asObject()) + //assert(res.get.props.get("titles").equals(titles)) + //assert(res.get.props.get("salaries").equals(salaries)) + //assert(res.get.props.get("boolattr").equals(boolattr)) + + } + +} diff --git a/itest/src/test/scala/external-properties/InSolrPropertyTest.scala b/itest/src/test/scala/external-properties/InSolrPropertyTest.scala new file mode 100644 index 00000000..d84b5961 --- /dev/null +++ b/itest/src/test/scala/external-properties/InSolrPropertyTest.scala @@ -0,0 +1,109 @@ + + +package Externalproperties + +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.externalprops.{InMemoryPropertyNodeStore, InSolrPropertyNodeStore, MutableNodeWithProperties, NodeWithProperties} +import org.junit.{Assert, Test} +import org.neo4j.driver.{AuthTokens, GraphDatabase, Transaction, TransactionWork} +import org.neo4j.values.{AnyValue, AnyValues} +import org.neo4j.values.storable.Values + +/** + * Created by codeBabyLin on 2019/12/5. + */ + +class InSolrPropertyTest { + + val configFile = new File("./testdata/neo4j.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + + //val collectionName = "test" + +//test for node label add and remove + @Test + def test1() { + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + var transaction = solrNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addLabel(1, "person") + transaction.addLabel(1, "people") + transaction.commit() + transaction.close() + Assert.assertEquals(1, solrNodeStore.getRecorderSize) + transaction = solrNodeStore.beginWriteTransaction() + var label = transaction.getNodeLabels(1) + Assert.assertEquals(2, label.size) + Assert.assertEquals("person", label.head) + Assert.assertEquals("people", label.last) + transaction.removeLabel(1, "people") + label = transaction.getNodeLabels(1) + Assert.assertEquals(1, label.size) + Assert.assertEquals("person", label.head) + transaction.commit() + transaction.close() + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + } + +// test for node property add and remove + @Test + def test2() { + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + var transaction = solrNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addProperty(1, "database", Values.of("pandaDB")) + + transaction.commit() + + transaction.close() + Assert.assertEquals(1, solrNodeStore.getRecorderSize) + transaction = solrNodeStore.beginWriteTransaction() + val name = transaction.getPropertyValue(1, "database") + Assert.assertEquals("pandaDB", name.get.asObject()) + + transaction.removeProperty(1, "database") + transaction.commit() + transaction.close() + Assert.assertEquals(1, solrNodeStore.getRecorderSize) + + val node = solrNodeStore.getNodeById(1).head.mutable() + Assert.assertEquals(true, node.props.isEmpty) + + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + } + + //test for undo + @Test + def test3() { + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + var transaction = solrNodeStore.beginWriteTransaction() + transaction.addNode(1) + transaction.addNode(2) + transaction.addLabel(1, "person") + transaction.addProperty(2, "name", Values.of("pandaDB")) + val undo = transaction.commit() + Assert.assertEquals(2, solrNodeStore.getRecorderSize) + val node1 = solrNodeStore.getNodeById(1) + val node2 = solrNodeStore.getNodeById(2) + Assert.assertEquals("person", node1.head.mutable().labels.head) + Assert.assertEquals("pandaDB", node2.head.mutable().props.get("name").get.asObject()) + undo.undo() + transaction.close() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + } +} diff --git a/itest/src/test/scala/external-properties/PredicateTest.scala b/itest/src/test/scala/external-properties/PredicateTest.scala new file mode 100644 index 00000000..1b767eaa --- /dev/null +++ b/itest/src/test/scala/external-properties/PredicateTest.scala @@ -0,0 +1,147 @@ +package Externalproperties + +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.externalprops.{InMemoryPropertyNodeStore, InSolrPropertyNodeStore, MutableNodeWithProperties, NodeWithProperties} +import org.junit.{Assert, Test} +import org.neo4j.cypher.internal.runtime.interpreted.{NFLessThan, NFPredicate, _} +import org.neo4j.driver.{AuthTokens, GraphDatabase, Transaction, TransactionWork} +import org.neo4j.values.{AnyValue, AnyValues} +import org.neo4j.values.storable.Values + +import scala.collection.mutable.ArrayBuffer + +/** + * Created by codeBabyLin on 2019/12/6. + */ + +class PredicateTest { + + val configFile = new File("./testdata/neo4j.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + + def prepareData(solrNodeStore: InSolrPropertyNodeStore): Int = { + + val node1 = MutableNodeWithProperties(1) + node1.labels += "database" + node1.props += "name" -> Values.of("pandaDB") + node1.props += "age" -> Values.of(1) + node1.props += "nation" -> Values.of("China") + + val node2 = MutableNodeWithProperties(2) + node2.labels += "database" + node2.props += "name" -> Values.of("neo4j") + node2.props += "age" -> Values.of(5) + + val node3 = MutableNodeWithProperties(3) + node3.labels += "person" + node3.props += "name" -> Values.of("bluejoe") + node3.props += "age" -> Values.of(40) + + val node4 = MutableNodeWithProperties(4) + node4.labels += "person" + node4.props += "name" -> Values.of("jason") + node4.props += "age" -> Values.of(39) + + val node5 = MutableNodeWithProperties(5) + node5.labels += "person" + node5.props += "name" -> Values.of("Airzihao") + node5.props += "age" -> Values.of(18) + + val nodeArray = ArrayBuffer[NodeWithProperties]() + nodeArray += NodeWithProperties(node1.id, node1.props.toMap, node1.labels) + nodeArray += NodeWithProperties(node2.id, node2.props.toMap, node2.labels) + nodeArray += NodeWithProperties(node3.id, node3.props.toMap, node3.labels) + nodeArray += NodeWithProperties(node4.id, node4.props.toMap, node4.labels) + nodeArray += NodeWithProperties(node5.id, node5.props.toMap, node5.labels) + solrNodeStore.addNodes(nodeArray) + solrNodeStore.getRecorderSize + } + + @Test + def test3() { + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + + Assert.assertEquals(5, prepareData(solrNodeStore)) + + val nodeList1 = solrNodeStore.getNodesByLabel("person") + val nodeList2 = solrNodeStore.getNodesByLabel("database") + + Assert.assertEquals(3, nodeList1.size) + Assert.assertEquals(2, nodeList2.size) + + var res1 = solrNodeStore.filterNodesWithProperties(NFGreaterThan("age", Values.of(39))) + Assert.assertEquals(1, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFGreaterThanOrEqual("age", Values.of(39))) + Assert.assertEquals(2, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFLessThan("age", Values.of(18))) + Assert.assertEquals(2, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFLessThanOrEqual("age", Values.of(18))) + Assert.assertEquals(3, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFEquals("age", Values.of(18))) + Assert.assertEquals("Airzihao", res1.head.mutable().props.get("name").get.asObject()) + + res1 = solrNodeStore.filterNodesWithProperties(NFContainsWith("name", "joe")) + Assert.assertEquals(1, res1.size) + Assert.assertEquals("bluejoe", res1.head.mutable().props.get("name").get.asObject()) + + res1 = solrNodeStore.filterNodesWithProperties(NFEndsWith("name", "son")) + Assert.assertEquals(1, res1.size) + Assert.assertEquals(39.toLong, res1.head.mutable().props.get("age").get.asObject()) + + res1 = solrNodeStore.filterNodesWithProperties(NFStartsWith("name", "pan")) + Assert.assertEquals(1, res1.size) + Assert.assertEquals("database", res1.head.labels.head) + + res1 = solrNodeStore.filterNodesWithProperties(NFStartsWith("name", "pan")) + Assert.assertEquals(1, res1.size) + Assert.assertEquals("database", res1.head.labels.head) + + res1 = solrNodeStore.filterNodesWithProperties(NFFalse()) + Assert.assertEquals(0, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFTrue()) + Assert.assertEquals(5, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFHasProperty("nation")) + Assert.assertEquals(1, res1.size) + Assert.assertEquals("pandaDB", res1.head.props.get("name").get.asObject()) + + res1 = solrNodeStore.filterNodesWithProperties(NFIsNull("nation")) + Assert.assertEquals(4, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFNotNull("nation")) + Assert.assertEquals(1, res1.size) + Assert.assertEquals("China", res1.head.props.get("nation").get.asObject()) + + res1 = solrNodeStore.filterNodesWithProperties(NFNotEquals("age", Values.of(18))) + Assert.assertEquals(4, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFRegexp("name", ".?lue.*")) + Assert.assertEquals(40, res1.head.mutable().props.get("age").get.asObject().toString.toLong) + + res1 = solrNodeStore.filterNodesWithProperties(NFAnd(NFIsNull("nation"), NFLessThanOrEqual("age", Values.of(18)))) + Assert.assertEquals(2, res1.size) + + res1 = solrNodeStore.filterNodesWithProperties(NFNot(NFIsNull("nation"))) + Assert.assertEquals(1, res1.size) + Assert.assertEquals("China", res1.head.props.get("nation").get.asObject()) + + res1 = solrNodeStore.filterNodesWithProperties(NFOr(NFNotNull("nation"), NFGreaterThanOrEqual("age", Values.of(40)))) + Assert.assertEquals(2, res1.size) + + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + } + +} diff --git a/itest/src/test/scala/external-properties/SolrIterableTest.scala b/itest/src/test/scala/external-properties/SolrIterableTest.scala new file mode 100644 index 00000000..19f741c8 --- /dev/null +++ b/itest/src/test/scala/external-properties/SolrIterableTest.scala @@ -0,0 +1,87 @@ +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.externalprops.{InSolrPropertyNodeStore, MutableNodeWithProperties, NodeWithProperties, SolrQueryResults} +import org.apache.solr.client.solrj.SolrQuery +import org.junit.{Assert, Test} +import org.neo4j.values.storable.Values + +import scala.collection.mutable.ArrayBuffer + +class SolrIterableTest { + + val configFile = new File("./testdata/neo4j.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + + //val collectionName = "test" + + //test for node label add and remove + def prepareData(solrNodeStore: InSolrPropertyNodeStore): Int = { + + val node1 = MutableNodeWithProperties(1) + node1.labels += "database" + node1.props += "name" -> Values.of("pandaDB") + node1.props += "age" -> Values.of(1) + node1.props += "nation" -> Values.of("China") + + val node2 = MutableNodeWithProperties(2) + node2.labels += "database" + node2.props += "name" -> Values.of("neo4j") + node2.props += "age" -> Values.of(5) + + val node3 = MutableNodeWithProperties(3) + node3.labels += "person" + node3.props += "name" -> Values.of("bluejoe") + node3.props += "age" -> Values.of(40) + + val node4 = MutableNodeWithProperties(4) + node4.labels += "person" + node4.props += "name" -> Values.of("jason") + node4.props += "age" -> Values.of(39) + + val node5 = MutableNodeWithProperties(5) + node5.labels += "person" + node5.props += "name" -> Values.of("Airzihao") + node5.props += "age" -> Values.of(18) + + val nodeArray = ArrayBuffer[NodeWithProperties]() + nodeArray += NodeWithProperties(node1.id, node1.props.toMap, node1.labels) + nodeArray += NodeWithProperties(node2.id, node2.props.toMap, node2.labels) + nodeArray += NodeWithProperties(node3.id, node3.props.toMap, node3.labels) + nodeArray += NodeWithProperties(node4.id, node4.props.toMap, node4.labels) + nodeArray += NodeWithProperties(node5.id, node5.props.toMap, node5.labels) + nodeArray += NodeWithProperties(node1.id + 5, node1.props.toMap, node1.labels) + nodeArray += NodeWithProperties(node2.id + 5, node2.props.toMap, node2.labels) + nodeArray += NodeWithProperties(node3.id + 5, node3.props.toMap, node3.labels) + nodeArray += NodeWithProperties(node4.id + 5, node4.props.toMap, node4.labels) + nodeArray += NodeWithProperties(node5.id + 5, node5.props.toMap, node5.labels) + nodeArray += NodeWithProperties(node1.id + 10, node1.props.toMap, node1.labels) + nodeArray += NodeWithProperties(node2.id + 10, node2.props.toMap, node2.labels) + nodeArray += NodeWithProperties(node3.id + 10, node3.props.toMap, node3.labels) + nodeArray += NodeWithProperties(node4.id + 10, node4.props.toMap, node4.labels) + nodeArray += NodeWithProperties(node5.id + 10, node5.props.toMap, node5.labels) + solrNodeStore.addNodes(nodeArray) + solrNodeStore.getRecorderSize + } + //scalastyle:off + @Test + def test1() { + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + Assert.assertEquals(0, solrNodeStore.getRecorderSize) + Assert.assertEquals(15, prepareData(solrNodeStore)) + val query = new SolrQuery("*:*") + val res = new SolrQueryResults(solrNodeStore._solrClient, query, 10) + val it = res.iterator2().toIterable + it.foreach(u => println(u)) + /* while (it.readNextPage()) { + it.getCurrentData().foreach(u => println(u)) + }*/ + // res.getAllResults().foreach(u => println(u)) + + } + +} diff --git a/itest/src/test/scala/external-properties/api-query/CreateQueryTest.scala b/itest/src/test/scala/external-properties/api-query/CreateQueryTest.scala new file mode 100644 index 00000000..00463ec8 --- /dev/null +++ b/itest/src/test/scala/external-properties/api-query/CreateQueryTest.scala @@ -0,0 +1,137 @@ + +import java.io.File +import java.time.ZoneId +import scala.collection.JavaConverters._ +import cn.pandadb.server.PNodeServer +import org.junit.{After, Assert, Before, Test} +import org.neo4j.graphdb.factory.GraphDatabaseFactory +import org.neo4j.graphdb.{GraphDatabaseService, Result, Label} +import org.neo4j.io.fs.FileUtils +import cn.pandadb.externalprops.{InMemoryPropertyNodeStore, InMemoryPropertyNodeStoreFactory} +import org.neo4j.values.storable.{DateTimeValue, DateValue, LocalDateTimeValue, TimeValue} + + +class CreateNodeQueryAPITest extends CreateQueryTestBase { + val tmpns = InMemoryPropertyNodeStore + + @Test + def test1(): Unit = { + // create one node + val tx1 = db.beginTx() + val label1 = Label.label("Person") + val node1 = db.createNode(label1) + + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 0) + tx1.success() + tx1.close() + // after tx close, data flushed to store + val id1 = node1.getId + assert(id1 != -1 ) + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 0) + assert(tmpns.nodes.get(id1).get.labels.size == 1 && tmpns.nodes.get(id1).get.labels.toList(0) == "Person") + + val tx2 = db.beginTx() + val node2 = db.createNode() + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 1) + tx2.success() + tx2.close() + + // after tx close, data flushed to store + val id2 = node2.getId + assert(id2 != -1) + assert(tmpns.nodes.size == 2) + assert(tmpns.nodes.get(id2).get.props.size == 0) + assert(tmpns.nodes.get(id2).get.labels == null || tmpns.nodes.get(id2).get.labels.size == 0) + } + + @Test + def test2(): Unit = { + // create node with labels and properties + val tx = db.beginTx() + val label1 = Label.label("Person") + val label2 = Label.label("Man") + val node1 = db.createNode(label1) + node1.setProperty("name", "test01") + node1.setProperty("age", 10) + node1.setProperty("adult", false) + val node2 = db.createNode(label1, label2) + node2.setProperty("name", "test02") + node2.setProperty("age", 20) + node2.setProperty("adult", true) + + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 0) + tx.success(); + tx.close() + + var id1: Long = node1.getId + var id2: Long = node2.getId + + // after tx close, data flushed to store + assert(tmpns.nodes.get(id1).size == 1 && tmpns.nodes.get(id2).size == 1) + + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 3 && fields1("name").equals("test01") && fields1("age").equals(10) && fields1("adult").equals(false) ) + val labels1 = tmpns.nodes.get(id1).get.labels.toList + assert(labels1.size == 1 && labels1(0) == "Person") + + val fields2 = tmpns.nodes.get(id2).get.props + assert(fields2.size == 3 && fields2("name").equals("test02") && fields2("age").equals(20) && fields2("adult").equals(true) ) + val labels2 = tmpns.nodes.get(id2).get.labels.toList + assert(labels2.size == 2 && labels2.contains("Person") && labels2.contains("Man") ) + + } + + @Test + def test3(): Unit = { + // create node with DateTime type property and array value + val tx = db.beginTx() + val label1 = Label.label("Person") + val node1 = db.createNode(label1) + node1.setProperty("name", "test01") + node1.setProperty("age", 10) + node1.setProperty("adult", false) + + val born1 = DateValue.date(2019, 1, 1) + val born2 = TimeValue.time(12, 5, 1, 0, "Z") + val born3 = DateTimeValue.datetime(2019, 1, 2, + 12, 5, 15, 0, "Australia/Eucla") + val born4 = DateTimeValue.datetime(2015, 6, 24, + 12, 50, 35, 556, ZoneId.of("Z")) + node1.setProperty("born1", born1) + node1.setProperty("born2", born2) + node1.setProperty("born3", born3) + node1.setProperty("born4", born4) + + val arr1 = Array(1, 2, 3) + val arr2 = Array("aa", "bb", "cc") + val arr3 = Array(true, false) + node1.setProperty("arr1", arr1) + node1.setProperty("arr2", arr2) + node1.setProperty("arr3", arr3) + + assert(tmpns.nodes.size == 0) + tx.success(); + tx.close() + + var id1: Long = node1.getId + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).size == 1 ) + + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 10 ) + assert(fields1("born1").asInstanceOf[DateValue].equals(born1)) + assert(fields1("born2").asInstanceOf[TimeValue].equals(born2)) + assert(fields1("born3").equals(born3)) + assert(fields1("born4").equals(born4)) + assert(fields1("arr1").equals(arr1)) + assert(fields1("arr2").equals(arr2)) + assert(fields1("arr3").equals(arr3)) + + } + + +} diff --git a/itest/src/test/scala/external-properties/api-query/MatchQueryTest.scala b/itest/src/test/scala/external-properties/api-query/MatchQueryTest.scala new file mode 100644 index 00000000..b2375b7a --- /dev/null +++ b/itest/src/test/scala/external-properties/api-query/MatchQueryTest.scala @@ -0,0 +1,64 @@ + +import org.junit.Test +import org.neo4j.graphdb.Label + +import scala.collection.JavaConverters._ + +class MatchQueryAPITest extends MatchQueryTestBase { + + @Test + def test1(): Unit = { + // initData + val tx = db.beginTx() + val label1 = Label.label("Person") + val label2 = Label.label("Man") + val node1 = db.createNode(label1) + node1.setProperty("name", "test01") + node1.setProperty("age", 10) + node1.setProperty("adult", false) + val node2 = db.createNode(label1, label2) + node2.setProperty("name", "test02") + node2.setProperty("age", 20) + node2.setProperty("adult", true) + tx.success() + tx.close() + + // test getAllNodes() + val tx1 = db.beginTx() + val nodes1 = db.getAllNodes().iterator() + var count = 0 + while (nodes1.hasNext) { + count += 1 + nodes1.next() + } + tx1.close() + assert(2 == count) + + // test getLabels() + val tx2 = db.beginTx() + val n1 = db.getNodeById(node1.getId) + val labels1 = n1.getLabels().asScala + for (label: Label <- labels1) { + assert(label.name() == "Person") + } + tx2.close() + + // test getAllProperties() + val tx3 = db.beginTx() + val n2 = db.getNodeById(node1.getId) + val props2 = n2.getAllProperties() + assert(props2.get("name").equals("test01")) + assert(props2.get("age").equals(10)) + assert(props2.get("adult").equals(false)) + tx3.close() + + // test getProperty() + val tx4 = db.beginTx() + val n4 = db.getNodeById(node2.getId) + assert(n4.getProperty("name").equals("test02")) + assert(n4.getProperty("age").equals(20)) + assert(n4.getProperty("adult").equals(true)) + tx4.close() + } + +} diff --git a/itest/src/test/scala/external-properties/api-query/UpdateQueryTest.scala b/itest/src/test/scala/external-properties/api-query/UpdateQueryTest.scala new file mode 100644 index 00000000..9b37ebef --- /dev/null +++ b/itest/src/test/scala/external-properties/api-query/UpdateQueryTest.scala @@ -0,0 +1,183 @@ + +import java.io.File + +import scala.collection.JavaConverters._ +import cn.pandadb.server.PNodeServer +import org.junit.{After, Before, Test} +import org.neo4j.graphdb.factory.GraphDatabaseFactory +import org.neo4j.graphdb.{GraphDatabaseService, Label} +import org.neo4j.io.fs.FileUtils +import cn.pandadb.externalprops.{InMemoryPropertyNodeStore, InMemoryPropertyNodeStoreFactory} + + +class UpdatePropertyQueryAPITest extends UpdateQueryTestBase { + val tmpns = InMemoryPropertyNodeStore + + @Test + def test1(): Unit = { + // update and add node properties + + // create node + val tx = db.beginTx() + val label1 = Label.label("Person") + val node1 = db.createNode(label1) + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 0) + tx.success() + tx.close() + // after tx close, data flushed to store + val id1 = node1.getId + assert(id1 != -1 ) + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 0) + assert(tmpns.nodes.get(id1).get.labels.size == 1 && tmpns.nodes.get(id1).get.labels.toList(0) == "Person") + + // add properties + + val tx2 = db.beginTx() + node1.setProperty("name", "test01") + node1.setProperty("age", 10) + node1.setProperty("sex", "male") + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 0) + tx2.success() + tx2.close() + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val fields = tmpns.nodes.get(id1).get.props + assert(fields.size == 3 && fields("name").equals("test01") && fields("age").equals(10) && + fields("sex").equals("male")) + + // update properties + val tx3 = db.beginTx() + node1.setProperty("name", "test02") + node1.setProperty("age", 20) + // before tx close, data haven't flush to store + val fields2 = tmpns.nodes.get(id1).get.props + assert(fields2.size == 3 && fields2("name").equals("test01") && fields2("age").equals(10) && + fields2("sex").equals("male")) + tx3.success() + tx3.close() + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val fields3 = tmpns.nodes.get(id1).get.props + assert(fields3.size == 3 && fields3("name").equals("test02") && fields3("age").equals(20) && + fields3("sex").equals("male")) + } + + @Test + def test2(): Unit = { + // delete node properties + + // create node + val tx1 = db.beginTx() + val label1 = Label.label("Person") + val node1 = db.createNode(label1) + node1.setProperty("name", "test01") + node1.setProperty("age", 10) + node1.setProperty("sex", "male") + val id1 = node1.getId + tx1.success() + tx1.close() + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val fields = tmpns.nodes.get(id1).get.props + assert(fields.size == 3 && fields("name").equals("test01") && fields("age").equals(10) && + fields("sex").equals("male")) + + // delete properties + val tx2 = db.beginTx() + node1.removeProperty("name") + node1.removeProperty("age") + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 1) + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 3 && fields1("name").equals("test01") && fields1("age").equals(10) && + fields1("sex").equals("male")) + tx2.success() + tx2.close() + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val fields2 = tmpns.nodes.get(id1).get.props + assert(fields2.size == 1 && fields2("sex").equals("male")) + + } + +} + + +class UpdateLabelQueryAPITest extends UpdateQueryTestBase { + val tmpns = InMemoryPropertyNodeStore + + + @Test + def test1(): Unit = { + // add labels + + // create node + val tx = db.beginTx() + val label1 = Label.label("Person") + val node1 = db.createNode(label1) + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 0) + tx.success() + tx.close() + // after tx close, data flushed to store + val id1 = node1.getId + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.labels.size == 1 && tmpns.nodes.get(id1).get.labels.toList(0) == "Person") + + // add label + val tx1 = db.beginTx() + val label2 = Label.label("Man") + node1.addLabel(label2) + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.labels.size == 1 ) + tx1.success() + tx1.close() + + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val labels = tmpns.nodes.get(id1).get.labels.toList + assert(labels.size == 2 && labels.contains("Person") && labels.contains("Man") ) + } + + @Test + def test2(): Unit = { + // remove one label + // create node + val tx = db.beginTx() + val label1 = Label.label("Person") + val label2 = Label.label("Boy") + val label3 = Label.label("Man") + val node1 = db.createNode(label1, label2, label3) + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 0) + tx.success() + tx.close() + // after tx close, data flushed to store + val id1 = node1.getId + assert(tmpns.nodes.size == 1) + val labels = tmpns.nodes.get(id1).get.labels.toList + assert(labels.size == 3 && labels.contains("Person") && labels.contains("Man") && labels.contains("Boy")) + + // add label + val tx1 = db.beginTx() + node1.removeLabel(label2) + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.labels.size == 3 ) + val labels2 = tmpns.nodes.get(id1).get.labels.toList + assert(labels2.size == 3 && labels2.contains("Person") && labels2.contains("Man") && labels2.contains("Boy")) + tx1.success() + tx1.close() + + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val labels3 = tmpns.nodes.get(id1).get.labels.toList + assert(labels3.size == 2 && labels3.contains("Person") && labels3.contains("Man") ) + } + +} \ No newline at end of file diff --git a/itest/src/test/scala/external-properties/cyhper-query/CreateQueryTest.scala b/itest/src/test/scala/external-properties/cyhper-query/CreateQueryTest.scala new file mode 100644 index 00000000..5be1637d --- /dev/null +++ b/itest/src/test/scala/external-properties/cyhper-query/CreateQueryTest.scala @@ -0,0 +1,273 @@ + +import java.io.File +import java.time.ZoneId + +import scala.collection.JavaConverters._ +import cn.pandadb.server.PNodeServer +import org.junit.{After, Assert, Before, Test} +import org.neo4j.graphdb.factory.GraphDatabaseFactory +import org.neo4j.graphdb.{GraphDatabaseService, Result} +import org.neo4j.io.fs.FileUtils +import cn.pandadb.externalprops.{CustomPropertyNodeStore, InMemoryPropertyNodeStore, InMemoryPropertyNodeStoreFactory} +import org.neo4j.values.storable.{DateTimeValue, DateValue, LocalDateTimeValue, TimeValue} + +trait CreateQueryTestBase extends QueryTestBase { + +} + +class CreateNodeQueryTest extends CreateQueryTestBase { + val tmpns = InMemoryPropertyNodeStore + + @Test + def test1(): Unit = { + // create one node + val query = "create (n1:Person) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + assert(id1 != -1 ) + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 0) + assert(tmpns.nodes.get(id1).get.labels.size == 1 && tmpns.nodes.get(id1).get.labels.toList(0) == "Person") + + val query2 = "create (n1) return id(n1)" + val rs2 = db.execute(query2) + var id2: Long = -1 + if (rs2.hasNext) { + val row = rs2.next() + id2 = row.get("id(n1)").toString.toLong + } + assert(id2 != -1) + assert(tmpns.nodes.size == 2) + assert(tmpns.nodes.get(id2).get.props.size == 0) + assert(tmpns.nodes.get(id2).get.labels.size == 0) + } + + @Test + def test2(): Unit = { + // create multiple nodes + val tx = db.beginTx() + val query = "create (n1:Person),(n2:Man) return id(n1),id(n2)" + val rs = db.execute(query) + var id1: Long = 0 + var id2: Long = 0 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + id2 = row.get("id(n2)").toString.toLong + } + assert(id1 != -1 && id2 != -1) + + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 0) + tx.success() + tx.close() + + // after tx close, data flushed to store + assert(tmpns.nodes.size == 2) + assert(tmpns.nodes.get(id1).size == 1 && tmpns.nodes.get(id2).size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 0 ) + assert(tmpns.nodes.get(id1).get.labels.size == 1 && tmpns.nodes.get(id1).get.labels.toList(0) == "Person") + assert(tmpns.nodes.get(id2).get.props.size == 0 ) + assert(tmpns.nodes.get(id2).get.labels.size == 1 && tmpns.nodes.get(id2).get.labels.toList(0) == "Man") + assert(tmpns.nodes.get(id2).get.labels.size == 1 && tmpns.nodes.get(id2).get.labels.toList(0) == "Man") + } + + @Test + def test3(): Unit = { + // create node with labels and properties + val query = + """CREATE (n1:Person { name:'test01', age:10, adult:False}) + |CREATE (n2:Person:Man { name:'test02', age:20, adult:True}) + |RETURN id(n1),id(n2) + """.stripMargin + val rs = db.execute(query) + var id1: Long = -1 + var id2: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + id2 = row.get("id(n2)").toString.toLong + } + + // Results have been visited, tx closed and data haven flush to store + assert(tmpns.nodes.get(id1).size == 1 && tmpns.nodes.get(id2).size == 1) + + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 3 && fields1("name").equals("test01") && fields1("age").equals(10) && fields1("adult").equals(false) ) + val labels1 = tmpns.nodes.get(id1).get.labels.toList + assert(labels1.size == 1 && labels1(0) == "Person") + + val fields2 = tmpns.nodes.get(id2).get.props + assert(fields2.size == 3 && fields2("name").equals("test02") && fields2("age").equals(20) && fields2("adult").equals(true) ) + val labels2 = tmpns.nodes.get(id2).get.labels.toList + assert(labels2.size == 2 && labels2.contains("Person") && labels2.contains("Man") ) + + } + + @Test + def test4(): Unit = { + // create node with relationship + val tx = db.beginTx() + val query = + """CREATE (n1:Person { name:'test01', age:10})-[:WorksAt]->(neo:Company{business:'Software'}) + |<-[:Create{from:1987}]-(n2:Ceo { name:'test02', age:20}) + |RETURN id(n1),id(n2), id(neo) + """.stripMargin + val rs = db.execute(query) + var id1: Long = -1 + var id2: Long = -1 + var idNeo: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + id2 = row.get("id(n2)").toString.toLong + idNeo = row.get("id(neo)").toString.toLong + } + + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 0) + tx.success(); + tx.close() + + // after tx close, data flushed to store + assert(tmpns.nodes.size == 3) + assert(tmpns.nodes.get(id1).size == 1 && tmpns.nodes.get(id2).size == 1 && tmpns.nodes.get(idNeo).size == 1) + + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 2 && fields1("name").equals("test01") && fields1("age").equals(10) ) + val labels1 = tmpns.nodes.get(id1).get.labels.toList + assert(labels1.size == 1 && labels1(0) == "Person") + + val fields2 = tmpns.nodes.get(id2).get.props + assert(fields2.size == 2 && fields2("name").equals("test02") && fields2("age").equals(20) ) + val labels2 = tmpns.nodes.get(id2).get.labels.toList + assert(labels2.size == 1 && labels2.contains("Ceo") ) + + val fields3 = tmpns.nodes.get(idNeo).get.props + assert(fields3.size == 1 && fields3("business").equals("Software")) + val labels3 = tmpns.nodes.get(idNeo).get.labels.toList + assert(labels3.size == 1 && labels3.contains("Company")) + + } + + @Test + def test5(): Unit = { + // create node with DateTime type property value + val tx = db.beginTx() + val query = + """CREATE (n1:Person { name:'test01',born1:date('2019-01-01'), born2:time('12:05:01') + |,born3:datetime('2019-01-02T12:05:15[Australia/Eucla]'), born4:datetime('2015-06-24T12:50:35.000000556Z')}) + |RETURN id(n1) + """.stripMargin + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + assert(tmpns.nodes.size == 0) + tx.success(); + tx.close() + + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).size == 1 ) + + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 5 ) + val born1 = DateValue.date(2019, 1, 1) + val born2 = TimeValue.time(12, 5, 1, 0, "Z") + val born3 = DateTimeValue.datetime(2019, 1, 2, + 12, 5, 15, 0, "Australia/Eucla") + val born4 = DateTimeValue.datetime(2015, 6, 24, + 12, 50, 35, 556, ZoneId.of("Z")) + + assert(fields1("born1").asInstanceOf[DateValue].equals(born1)) + assert(fields1("born2").asInstanceOf[TimeValue].equals(born2)) + assert(fields1("born3").equals(born3)) + assert(fields1("born4").equals(born4)) + } + + @Test + def test6(): Unit = { + // create node with Array type property value + val query = + """CREATE (n1:Person { name:'test01',titles:["ceo","ui","dev"], + |salaries:[10000,20000,30597,500954], boolattr:[False,True,false,true]}) + |RETURN id(n1) + """.stripMargin + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next () + id1 = row.get("id(n1)").toString.toLong + } + + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).size == 1 ) + + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 4 ) + val titles = Array("ceo", "ui", "dev") + val salaries = Array(10000, 20000, 30597, 500954) + val boolattr = Array(false, true, false, true) + assert(fields1("titles").equals(titles)) + assert(fields1("salaries").equals(salaries)) + assert(fields1("boolattr").equals(boolattr)) + + } + +} + + +class CreateNodeQueryTest2 extends QueryTestBase{ + nodeStore = "" + + @Test + def test1(): Unit = { + // create node with labels and properties + val query = + """CREATE (n1:Person { name:'test01', age:10, adult:False}) + |CREATE (n2:Person:Man { name:'test02', age:20, adult:True}) + |RETURN id(n1),id(n2) + """.stripMargin + val tx1 = db.beginTx() + val rs = db.execute(query) + var id1: Long = -1 + var id2: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + id2 = row.get("id(n2)").toString.toLong + } + tx1.success() + tx1.close() + + val query2 = s"match (n1:Person) return n1.name;" + val tx2 = db.beginTx() + val rs2 = db.execute(query2) + while (rs2.hasNext) { + val n1 = rs2.next() + println(n1.get("n1.name") ) + } + tx2.close() + + val tx3 = db.beginTx() + val nodes = db.getAllNodes.iterator() + while (nodes.hasNext){ + val n1 = nodes.next() + val labels = n1.getLabels.asScala + var labelsStr= "" + for (lbl <- labels) { + labelsStr += "," + lbl.name + } + println(labelsStr) + } + tx3.close() + + } +} + diff --git a/itest/src/test/scala/external-properties/cyhper-query/MatchQueryTest.scala b/itest/src/test/scala/external-properties/cyhper-query/MatchQueryTest.scala new file mode 100644 index 00000000..32f8fd4b --- /dev/null +++ b/itest/src/test/scala/external-properties/cyhper-query/MatchQueryTest.scala @@ -0,0 +1,105 @@ + +import org.junit.Test +import org.neo4j.graphdb.Result + + +trait MatchQueryTestBase extends QueryTestBase { + def doCreate(queryStr: String): Unit = { + val tx = db.beginTx() + val rs = db.execute(queryStr) + tx.success() + tx.close() + } + + def initData(): Unit = { + val queryStr = + """ + |CREATE (n1:Person:Student{name: 'test01',age:15, sex:'male', school: 'No1 Middle School'}), + |(n2:Person:Teacher{name: 'test02', age: 30, sex:'male', school: 'No1 Middle School', class: 'math'}), + |(n3:Person:Teacher{name: 'test03', age: 40, sex:'female', school: 'No1 Middle School', class: 'chemistry'}) + | """.stripMargin + doCreate(queryStr) + } + + def rsRowCount(rs: Result): Int = { + var count: Int = 0; + while (rs.hasNext) { + count += 1 + println(rs.next()) + } + return count + } +} + +class MatchQueryTest extends MatchQueryTestBase { + + @Test + def test1(): Unit = { + // filter nodes by label + + initData() + // Get all nodes + val query1 = + """match (n) return n + | """.stripMargin + val rs = db.execute(query1) + assert(rsRowCount(rs) == 3) + + // filter by label + val query2 = "match (n:Person) return n" + val rs2 = db.execute(query2) + assert(rsRowCount(rs2) == 3) + + val query3 = "match (n:Teacher) return n" + val rs3 = db.execute(query3) + assert(rsRowCount(rs3) == 2) + + val query4 = "match (n:Person:Student) return n" + val rs4 = db.execute(query4) + assert(rsRowCount(rs4) == 1) + + } + + @Test + def test2(): Unit = { + // filter by property + initData() + + // filter by {} + val query1 = "match (n:Person{name: 'test01'}) return n" + val rs1 = db.execute(query1) + assert(rsRowCount(rs1) == 1) + + // filter by where + val query2 = "match (n) where n.name='test01' return n" + val rs2 = db.execute(query2) + assert(rsRowCount(rs2) == 1) + + // filter by where + val query3 = "match (n:Teacher) where n.age<35 and n.sex='male' return n" + val rs3 = db.execute(query3) + assert(rsRowCount(rs3) == 1) + } + + @Test + def test3(): Unit = { + // get property + initData() + + // filter by {} + val query1 = "match (n:Person{name: 'test01'}) return n.sex, n.age, n.class, n.school" + val rs1 = db.execute(query1) + assert(rsRowCount(rs1) == 1) + while (rs1.hasNext) { + val row = rs1.next() + val sex = row.get("n.sex") + assert("male" == sex) + val age = row.get("n.age") + assert(15 == age) + val school = row.get("n.school") + assert("No1 Middle School" == school) + } + } + + +} diff --git a/itest/src/test/scala/external-properties/cyhper-query/UpdateQueryTest.scala b/itest/src/test/scala/external-properties/cyhper-query/UpdateQueryTest.scala new file mode 100644 index 00000000..4f7b3e1d --- /dev/null +++ b/itest/src/test/scala/external-properties/cyhper-query/UpdateQueryTest.scala @@ -0,0 +1,296 @@ + +import cn.pandadb.externalprops.InMemoryPropertyNodeStore +import org.junit.Test + + +trait UpdateQueryTestBase extends QueryTestBase { + +} + +class UpdatePropertyQueryTest extends UpdateQueryTestBase { + val tmpns = InMemoryPropertyNodeStore + + @Test + def test1(): Unit = { + // update and add node properties using 'set n.prop1=value1,n.prop2=value2' + + // create node + val tx = db.beginTx() + val query = "create (n1:Person) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + tx.success() + tx.close() + assert(id1 != -1) + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 0) + assert(tmpns.nodes.get(id1).get.labels.size == 1 && tmpns.nodes.get(id1).get.labels.toList(0) == "Person") + + // update and add properties + val tx2 = db.beginTx() + val query2 = s"match (n1:Person) where id(n1)=$id1 set n1.name='test01', n1.age=10 return n1.name,n1.age" + db.execute(query2) + tx2.success() + tx2.close() + assert(tmpns.nodes.size == 1) + val fields = tmpns.nodes.get(id1).get.props + assert(fields.size == 2 && fields("name").equals("test01") && fields("age").equals(10)) + } + + + @Test + def test2(): Unit = { + // update node properties using 'set n={prop1:value1, prop2:value2}' + + // create node + val tx = db.beginTx() + val query = "create (n1:Person{name:'test01',age:10}) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + tx.success() + tx.close() + assert(id1 != -1) + assert(tmpns.nodes.size == 1) + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 2 && fields1("name").equals("test01") && fields1("age").equals(10)) + + // update property + val tx2 = db.beginTx() + val query2 = s"match (n1:Person) where id(n1)=$id1 set n1={name:'test02', sex:'male'} return n1" + db.execute(query2) + tx2.success() + tx2.close() + assert(tmpns.nodes.size == 1) + val fields2 = tmpns.nodes.get(id1).get.props + assert(fields2.size == 2 && fields2("name").equals("test02") && fields2("sex").equals("male")) + } + + + @Test + def test3(): Unit = { + // update or add node properties using 'set n +={prop1:value1, prop2:value2}' + + // create node + val tx = db.beginTx() + val query = "create (n1:Person{name:'test01',age:10}) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + tx.success() + tx.close() + assert(id1 != -1) + assert(tmpns.nodes.size == 1) + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 2 && fields1("name").equals("test01") && fields1("age").equals(10)) + + // update property + val tx2 = db.beginTx() + val query2 = s"match (n1:Person) where id(n1)=$id1 set n1 +={name:'test02',sex:'male', work:'dev'} return n1" + db.execute(query2) + tx2.success() + tx2.close() + assert(tmpns.nodes.size == 1) + val fields2 = tmpns.nodes.get(id1).get.props + assert(fields2.size == 4 && fields2("name").equals("test02") && fields2("age").equals(10) && + fields2("sex").equals("male") && fields2("work").equals("dev")) + } + + @Test + def test4(): Unit = { + // remove node properties using 'remove n.prop1' + + // create node + val tx = db.beginTx() + val query = "create (n1:Person{name:'test01',age:10}) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + tx.success() + tx.close() + assert(id1 != -1) + assert(tmpns.nodes.size == 1) + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 2 && fields1("name").equals("test01") && fields1("age").equals(10)) + + // remove one property + val tx2 = db.beginTx() + val query2 = s"match (n1:Person) where id(n1)=$id1 remove n1.age" + db.execute(query2) + tx2.success() + tx2.close() + assert(tmpns.nodes.size == 1) + val fields2 = tmpns.nodes.get(id1).get.props + assert(fields2.size == 1 && fields2("name").equals("test01")) + } + + @Test + def test5(): Unit = { + // remove node all properties using 'set n={}' + + // create node + val tx = db.beginTx() + val query = "create (n1:Person{name:'test01',age:10}) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + tx.success() + tx.close() + assert(id1 != -1) + assert(tmpns.nodes.size == 1) + val fields1 = tmpns.nodes.get(id1).get.props + assert(fields1.size == 2 && fields1("name").equals("test01") && fields1("age").equals(10)) + + // remove property + val tx2 = db.beginTx() + val query2 = s"match (n1:Person) where id(n1)=$id1 set n1={} return n1" + db.execute(query2) + tx2.success() + tx2.close() + assert(tmpns.nodes.size == 1) + val fields2 = tmpns.nodes.get(id1).get.props + assert(fields2.size == 0) + } + +} + + +class UpdateLabelQueryTest extends UpdateQueryTestBase { + val tmpns = InMemoryPropertyNodeStore + + @Test + def test1(): Unit = { + // add labels + + // create node + val query = "create (n1:Person{name:'xx'}) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + + assert(id1 != -1) + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 1) + assert(tmpns.nodes.get(id1).get.labels.size == 1 && tmpns.nodes.get(id1).get.labels.toList(0) == "Person") + + // add labels + val tx1 = db.beginTx() + val query3 = s"match (n1:Person) where id(n1)=$id1 set n1:Man:Boy:Person return labels(n1)" + db.execute(query3) + + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 1) + assert(tmpns.nodes.get(id1).get.labels.size == 1 && tmpns.nodes.get(id1).get.labels.toList(0) == "Person") + tx1.success() + tx1.close() + + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val labels = tmpns.nodes.get(id1).get.labels.toList + assert(labels.size == 3 && labels.contains("Person") && labels.contains("Man") && labels.contains("Boy")) + } + + @Test + def test2(): Unit = { + // remove one label + + // create node + val tx = db.beginTx() + val query = "create (n1:Person:Man{name:'xx'}) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + tx.success() + tx.close() + assert(id1 != -1) + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 1) + var labels1 = tmpns.nodes.get(id1).get.labels.toList + assert(labels1.size == 2 && labels1.contains("Person") && labels1.contains("Man")) + + // update labels + val tx2 = db.beginTx() + val query3 = s"match (n1:Person) where id(n1)=$id1 remove n1:Person return labels(n1)" + db.execute(query3) + + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 1) + labels1 = tmpns.nodes.get(id1).get.labels.toList + assert(labels1.size == 2 && labels1.contains("Person") && labels1.contains("Man")) + tx2.success() + tx2.close() + + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val labels2 = tmpns.nodes.get(id1).get.labels.toList + assert(labels2.size == 1 && labels2.contains("Man")) + } + + + @Test + def test3(): Unit = { + // remove multi labels + + // create node + // val tx = db.beginTx() + val query = "create (n1:Person:Man:Boy{name:'xx'}) return id(n1)" + val rs = db.execute(query) + var id1: Long = -1 + if (rs.hasNext) { + val row = rs.next() + id1 = row.get("id(n1)").toString.toLong + } + // tx.success() + // tx.close() + // Results have been visited, tx closed and data haven flush to store + assert(id1 != -1) + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 1) + var labels1 = tmpns.nodes.get(id1).get.labels.toList + assert(labels1.size == 3 && labels1.contains("Person") && labels1.contains("Man") && labels1.contains("Boy")) + + // update labels + val tx2 = db.beginTx() + val query3 = s"match (n1:Person) where id(n1)=$id1 remove n1:Person:Boy return labels(n1)" + db.execute(query3) + + // before tx close, data haven't flush to store + assert(tmpns.nodes.size == 1) + assert(tmpns.nodes.get(id1).get.props.size == 1) + labels1 = tmpns.nodes.get(id1).get.labels.toList + assert(labels1.size == 3 && labels1.contains("Person") && labels1.contains("Man") && labels1.contains("Boy")) + tx2.success() + tx2.close() + + // after tx close, data flushed to store + assert(tmpns.nodes.size == 1) + val labels2 = tmpns.nodes.get(id1).get.labels.toList + assert(labels2.size == 1 && labels2.contains("Man")) + } + + +} \ No newline at end of file diff --git a/itest/src/test/scala/external-properties/neo4jANDsolrPerformanceTest.scala b/itest/src/test/scala/external-properties/neo4jANDsolrPerformanceTest.scala new file mode 100644 index 00000000..11ebd524 --- /dev/null +++ b/itest/src/test/scala/external-properties/neo4jANDsolrPerformanceTest.scala @@ -0,0 +1,247 @@ +package externals + +import java.io.{File, FileInputStream} +import java.util.Properties +import java.util.function.Consumer + +import cn.pandadb.externalprops.{InSolrPropertyNodeStore, NodeWithProperties, SolrQueryResults, SolrUtil} +import org.apache.solr.client.solrj.SolrQuery +import org.apache.solr.client.solrj.impl.CloudSolrClient +import org.junit.{Assert, Test} +import org.neo4j.cypher.internal.runtime.interpreted.{NFAnd, NFEquals, NFGreaterThan, NFLessThan, NFPredicate} +import org.neo4j.driver.{AuthTokens, GraphDatabase} +import org.neo4j.values.storable.Values +import org.scalatest.selenium.WebBrowser.Query + +import scala.collection.JavaConversions._ +import scala.collection.mutable.ArrayBuffer + +class neo4jANDsolrPerformanceTest { + + val configFile = new File("./testdata/codeBabyTest.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + val _solrClient = { + val client = new CloudSolrClient(zkString); + client.setZkClientTimeout(30000); + client.setZkConnectTimeout(50000); + client.setDefaultCollection(collectionName); + client + } + + //val size = solrNodeStore.getRecorderSize + //Assert.assertEquals(13738580, size) + val uri = "bolt://10.0.82.220:7687" + val driver = GraphDatabase.driver(uri, + AuthTokens.basic("neo4j", "bigdata")) + //val res = driver.session().run("match (n) return count(n)") + //scalastyle:off + val session = driver.session() + + def solrIteratorTime(exp: NFPredicate): Unit ={ + val time1 = System.currentTimeMillis() + val ssize = solrNodeStore.filterNodesWithProperties(exp).size + val time2 = System.currentTimeMillis() + println(s"solr Iterator time :${time2-time1},result size:$ssize") + } + + def neo4jTime(cypher: String): Unit ={ + val time1 = System.currentTimeMillis() + val nsize = session.run(cypher).list().size() + val time2 = System.currentTimeMillis() + println(s"neo4j Iterator time :${time2-time1},result size:$nsize") + } + + def solrArray(q: String): Unit ={ + val nodeArray = ArrayBuffer[NodeWithProperties]() + val solrQuery = new SolrQuery() + solrQuery.set(q) + val time1 = System.currentTimeMillis() + val size = _solrClient.query(solrQuery).getResults.getNumFound + solrQuery.setRows(size.toInt) + + val res = _solrClient.query(solrQuery).getResults + res.foreach(u => nodeArray += SolrUtil.solrDoc2nodeWithProperties(u)) + val ssize = nodeArray.size + val time2 = System.currentTimeMillis() + println(s"solr Array time :${time2-time1},result size:$ssize") + + } + + def test(exp: NFPredicate = null, cypher: String = null, q: String = null): Unit ={ + + if(q!=null) { + solrArray(q) + } + + if (cypher!=null) { + neo4jTime(cypher) + } + + if (exp!=null) { + solrIteratorTime(exp) + } + + } + + def testFiveFilters(): Unit ={ + + val s71 = NFEquals("labels", Values.of("person")) + val s72 = NFGreaterThan("citations", Values.of(400)) + val s73 = NFGreaterThan("citations5", Values.of(80)) + val s74 = NFLessThan("citations", Values.of(450)) + val s75 = NFLessThan("citations5", Values.of(100)) + val s76 = NFEquals("nationality", Values.of("China")) + val s712 = NFAnd(s71, s72) + val s734 = NFAnd(s73, s74) + val s756 = NFAnd(s75, s76) + val s71234 = NFAnd(s712, s734) + val s7 = NFAnd(s71234, s756) + val n7 = "MATCH (n:person) where n.citations>400 and n.citations<450 and n.nationality='China' and n.citations5<100 and n.citations5>80 return n" + + val s81 = NFEquals("labels", Values.of("organization")) + val s82 = NFEquals("country", Values.of("China")) + val s83 = NFGreaterThan("citations", Values.of(200000)) + val s84 = NFGreaterThan("citations5", Values.of(140000)) + val s85 = NFLessThan("citations", Values.of(500000)) + val s812 = NFAnd(s81, s82) + val s834 = NFAnd(s83, s84) + val s81234 = NFAnd(s812, s834) + val s8 = NFAnd(s81234, s85) + val n8 = "MATCH (n:organization) where n.citations>200000 and n.citations<500000 and n.country='China' and n.citations5>140000 RETURN n" + + solrIteratorTime(s7) + neo4jTime(n7) + + solrIteratorTime(s8) + neo4jTime(n8) + + } + + def testThreeFilter(): Unit ={ + + val sq4501 = NFEquals("labels", Values.of("paper")) + val sq4502 = NFGreaterThan("citation", Values.of(350)) + val sq4503 = NFEquals("country", Values.of("India")) + val s4q5012 = NFAnd(sq4501, sq4502) + val s4q50123 = NFAnd(s4q5012, sq4503) + val n4q50 = "MATCH (n:paper) where n.citation >350 and n.country='India' return n" + + val sq5501 = NFEquals("labels", Values.of("person")) + val sq5502 = NFGreaterThan("citations", Values.of(100000)) + val sq5503 = NFEquals("nationality", Values.of("China")) + val s5q5012 = NFAnd(sq5501, sq5502) + val s5q50123 = NFAnd(s5q5012, sq5503) + val n5q50 = "MATCH (n:person) where n.citations>100000 and n.nationality='China' return n" + + + val sq6501 = NFEquals("labels", Values.of("organization")) + val sq6502 = NFGreaterThan("citations", Values.of(2000000)) + val sq6503 = NFEquals("country", Values.of("China")) + val s6q5012 = NFAnd(sq6501, sq6502) + val s6q50123 = NFAnd(s6q5012, sq6503) + val n6q50 = "MATCH (n:organization) where n.citations>2000000 and n.country='China' RETURN n" + + solrIteratorTime(s4q50123) + neo4jTime(n4q50) + solrIteratorTime(s5q50123) + neo4jTime(n5q50) + solrIteratorTime(s6q50123) + neo4jTime(n6q50) + + } + + def testLessThan50(): Unit ={ + + val sq501 = NFEquals("labels", Values.of("organization")) + val sq502 = NFGreaterThan("citations", Values.of(60000000)) + val sq5012 = NFAnd(sq501, sq502) + val s1q50 = sq5012 + val n1q50 = "match (n:organization) where n.citations>60000000 return n" + + val sq2501 = NFEquals("labels", Values.of("paper")) + val sq2502 = NFGreaterThan("citation", Values.of(1500)) + val s2q50 = NFAnd(sq2501, sq2502) + val n2q50 = "MATCH (n:paper) where n.citation >1500 return n" + + val sq3501 = NFEquals("labels", Values.of("person")) + val sq3502 = NFGreaterThan("citations", Values.of(400000)) + val s3q50 = NFAnd(sq3501, sq3502) + val n3q50 = "MATCH (n:person) where n.citations>400000 return n" + + + solrIteratorTime(s1q50) + neo4jTime(n1q50) + solrIteratorTime(s2q50) + neo4jTime(n2q50) + solrIteratorTime(s3q50) + neo4jTime(n3q50) + + } + + def testForeach(): Unit ={ + + val n1 = "match (n) where id(n)=8853096 return n" + val s1 = NFEquals("id", Values.of(8853096)) + val ss1 = "id:8853096" + + + val n2 = "match (n:organization) where n.citations>985 return n" + val s21 = NFGreaterThan("citations", Values.of(985)) + val s22 = NFEquals("labels", Values.of("organization")) + val s2 = NFAnd(s21 , s22) + val ss2 = "labels:organization && citations:{ 985 TO *}" + + val n3 = "match (n) where n.nationality='France' return n" + val s3 = NFEquals("nationality", Values.of("France")) + val ss3 = "nationality:France" + + val n4 = "match (n:organization) return n" + val s4 = NFEquals("labels", Values.of("organization")) + val ss4 = "labels:organization" + + val n5 = "match (n:person) where n.citations>100 and n.citations5<200 and n.nationality='Russia' return n" + val s51 = NFEquals("labels", Values.of("person")) + val s52 = NFGreaterThan("citations", Values.of(100)) + val s53 = NFLessThan("citations5", Values.of(200)) + val s54 = NFEquals("nationality", Values.of("Russia")) + val s512 = NFAnd(s51, s52) + val s534 = NFAnd(s53, s54) + val s5 = NFAnd(s512, s534) + val ss5 = "(labels:person && citations:{ 100 TO * }) && (nationality:Russia && citations5:{ * TO 200 })" + + val n6 = "match (n) where n.citations>100 and n.citations<150 return n" + val s61 = NFGreaterThan("citations", Values.of(100)) + val s62 = NFLessThan("citations", Values.of(150)) + val s6 = NFAnd(s61, s62) + val ss6 = "citations:{ 100 TO 150 }" + + test(s1, n1, ss1) + test(s2, n2, ss2) + test(s3, n3, ss3) + test(s4, n4, ss4) + test(s5, n5, ss5) + test(s6, n6, ss6) + + + + } + + @Test + def test1() { + + + // testForeach() + // testFiveFilters() + // testLessThan50() + // testThreeFilter() + + + } + +} + diff --git a/itest/src/test/scala/external-properties/ppd/InEsPredicatePushDown.scala b/itest/src/test/scala/external-properties/ppd/InEsPredicatePushDown.scala new file mode 100644 index 00000000..a59db301 --- /dev/null +++ b/itest/src/test/scala/external-properties/ppd/InEsPredicatePushDown.scala @@ -0,0 +1,30 @@ +package ppd + +import java.io.{File, FileInputStream} +import java.util.Properties + +import org.junit.{Before, Test} +import cn.pandadb.externalprops.{ExternalPropertiesContext, InElasticSearchPropertyNodeStore} + +class InEsPredicatePushDown extends QueryCase { + + @Before + def init(): Unit = { + val configFile = new File("./testdata/neo4j.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + + val esHost = props.getProperty("external.properties.store.es.host") + val esPort = props.getProperty("external.properties.store.es.port").toInt + val esSchema = props.getProperty("external.properties.store.es.schema") + val esIndex = props.getProperty("external.properties.store.es.index") + val esType = props.getProperty("external.properties.store.es.type") + val esScrollSize = props.getProperty("external.properties.store.es.scroll.size", "1000").toInt + val esScrollTime = props.getProperty("external.properties.store.es.scroll.time.minutes", "10").toInt + + val esNodeStore = new InElasticSearchPropertyNodeStore(esHost, esPort, esIndex, esType, esSchema, esScrollSize, esScrollTime) + esNodeStore.clearAll() + buildDB(esNodeStore) + } + +} \ No newline at end of file diff --git a/itest/src/test/scala/external-properties/ppd/InMemoryPredicatePushDown.scala b/itest/src/test/scala/external-properties/ppd/InMemoryPredicatePushDown.scala new file mode 100644 index 00000000..50604b51 --- /dev/null +++ b/itest/src/test/scala/external-properties/ppd/InMemoryPredicatePushDown.scala @@ -0,0 +1,13 @@ +package ppd + +import org.junit.Before +import cn.pandadb.externalprops.InMemoryPropertyNodeStore + +class InMemoryPredicatePushDown extends QueryCase { + + @Before + def init(): Unit = { + buildDB(InMemoryPropertyNodeStore) + } + +} \ No newline at end of file diff --git a/itest/src/test/scala/external-properties/ppd/InSolrPredicatePushDown.scala b/itest/src/test/scala/external-properties/ppd/InSolrPredicatePushDown.scala new file mode 100644 index 00000000..cfc505fa --- /dev/null +++ b/itest/src/test/scala/external-properties/ppd/InSolrPredicatePushDown.scala @@ -0,0 +1,23 @@ +package ppd + +import java.io.{File, FileInputStream} +import java.util.Properties + +import org.junit.Before +import cn.pandadb.externalprops.{ExternalPropertiesContext, InSolrPropertyNodeStore} + +class InSolrPredicatePushDown extends QueryCase { + + @Before + def init(): Unit = { + val configFile = new File("./testdata/neo4j.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + buildDB(solrNodeStore) + } + +} \ No newline at end of file diff --git a/itest/src/test/scala/external-properties/ppd/QueryCase.scala b/itest/src/test/scala/external-properties/ppd/QueryCase.scala new file mode 100644 index 00000000..8231df5f --- /dev/null +++ b/itest/src/test/scala/external-properties/ppd/QueryCase.scala @@ -0,0 +1,150 @@ +package ppd + +import java.io.File + +import cn.pandadb.externalprops.{CustomPropertyNodeStore, ExternalPropertiesContext, InMemoryPropertyNodeStore, InMemoryPropertyNodeStoreFactory} +import cn.pandadb.util.GlobalContext +import org.junit.{After, Test} +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.graphdb.factory.GraphDatabaseFactory +import org.neo4j.io.fs.FileUtils + +trait QueryCase { + + var db: GraphDatabaseService = null + + def buildDB(store: CustomPropertyNodeStore): Unit = { + if (db == null) { + ExternalPropertiesContext.bindCustomPropertyNodeStore(store) + GlobalContext.setLeaderNode(true) + val dbFile: File = new File("./output/testdb") + FileUtils.deleteRecursively(dbFile); + dbFile.mkdirs(); + db = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(dbFile).newGraphDatabase() + db.execute("CREATE (n:Person {age: 10, name: 'bob', address: 'CNIC, CAS, Beijing, China'})") + db.execute("CREATE (n:Person {age: 10, name: 'bob2', address: 'CNIC, CAS, Beijing, China'})") + db.execute("CREATE (n:Person {age: 40, name: 'alex', address: 'CNIC, CAS, Beijing, China'})") + db.execute("CREATE (n:Person {age: 40, name: 'alex2', address: 'CNIC, CAS, Beijing, China'})") + db.execute("CREATE INDEX ON :Person(address)") + db.execute("CREATE INDEX ON :Person(name)") + db.execute("CREATE INDEX ON :Person(age)") + db.execute("CREATE INDEX ON :Person(name, age)") + db.execute("match (f:Person), (s:Person) where f.age=40 AND s.age=10 CREATE (f)-[hood:Father]->(s)") + } + } + + @After + def shutdownDB(): Unit = { + db.shutdown() + } + + def testQuery(query: String, resultKey: String): Unit = { + val rs = db.execute(query) + var resultValue: Long = -1 + if (rs.hasNext) { + resultValue = rs.next().get(resultKey).toString.toLong + } + assert(resultValue != -1) + } + + @Test + def lessThan(): Unit = { + testQuery("match (n) where 18>n.age return id(n)", "id(n)") + } + + @Test + def greaterThan(): Unit = { + testQuery("match (n) where 9(s:Person) where f.name STARTS WITH 'a' and s.name STARTS WITH 'b' return COUNT(s)", "COUNT(s)") + } + + @Test + def indexStringEndsWith(): Unit = { + testQuery("match (n:Person) USING INDEX n:Person(address, age) where n.address ENDS WITH 'China' and n.age = 10 return id(n)", "id(n)") + } + + @Test + def compositeIndexStringEndsWith(): Unit = { + testQuery("match (n:Person) where n.name = 'bob' and n.age = 10 return count(n)", "count(n)") + } + + @Test + def udf(): Unit = { + testQuery("match (n:Person) where toInteger(n.age) = 10 AND subString(n.address,0,4) = 'CNIC' return id(n)", "id(n)") + } + +// @Test +// def notEqual(): Unit = { +// testQuery("match (f:Person)-[:Father]->(s:Person) where not f.age = s.age return count(f)", "count(f)") +// } + + @Test + def hasProperty(): Unit = { + testQuery("match (n:Person) WHERE NOT EXISTS (n.age) return count(n)", "count(n)") + } + + @Test + def in(): Unit = { + testQuery("match (n:Person) WHERE n.age IN [40, 10] return count(n)", "count(n)") + } + +} diff --git a/itest/src/test/scala/external-properties/ppd/RelationCase.scala b/itest/src/test/scala/external-properties/ppd/RelationCase.scala new file mode 100644 index 00000000..892f1b6f --- /dev/null +++ b/itest/src/test/scala/external-properties/ppd/RelationCase.scala @@ -0,0 +1,47 @@ +package ppd + +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.externalprops.{ExternalPropertiesContext, InSolrPropertyNodeStore} +import cn.pandadb.util.GlobalContext +import org.junit.Test +import org.neo4j.driver.{AuthTokens, GraphDatabase} + +class RelationCase { + + val configFile = new File("./testdata/codeBabyTest.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + GlobalContext.setLeaderNode(true) + + val uri = "bolt://10.0.82.220:7687" + val driver = GraphDatabase.driver(uri, AuthTokens.basic("neo4j", "bigdata")) + val session = driver.session() + val query = "match (n:person)-[:write_paper]->(p:paper) where p.country = 'United States' AND n.citations>10 return count(n)" + + def run(): Unit = { + val startTime = System.currentTimeMillis() + val result = session.run(query) + val endTime = System.currentTimeMillis() + println(result.list()) + println(s"query latency: ${endTime-startTime}") + } + + @Test + def solr() { + ExternalPropertiesContext.bindCustomPropertyNodeStore(solrNodeStore) + run() + } + + @Test + def native(): Unit = { + ExternalPropertiesContext.bindCustomPropertyNodeStore(null) + run() + } + +} diff --git a/itest/src/test/scala/external-properties/query-testbase/QueryTestBase.scala b/itest/src/test/scala/external-properties/query-testbase/QueryTestBase.scala new file mode 100644 index 00000000..44bbd5f3 --- /dev/null +++ b/itest/src/test/scala/external-properties/query-testbase/QueryTestBase.scala @@ -0,0 +1,57 @@ + +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.externalprops._ +import cn.pandadb.server.PNodeServer +import cn.pandadb.util.GlobalContext +import org.junit.{After, Before} +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.graphdb.factory.GraphDatabaseFactory +import org.neo4j.io.fs.FileUtils + +trait QueryTestBase { + var db: GraphDatabaseService = null + var nodeStore = "InMemoryPropertyNodeStore" + + @Before + def initdb(): Unit = { + PNodeServer.toString + new File("./output/testdb").mkdirs(); + FileUtils.deleteRecursively(new File("./output/testdb")); + db = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(new File("./output/testdb")). + newGraphDatabase() + nodeStore match { + case "InMemoryPropertyNodeStore" => + InMemoryPropertyNodeStore.nodes.clear() + ExternalPropertiesContext.bindCustomPropertyNodeStore(InMemoryPropertyNodeStore) + GlobalContext.setLeaderNode(true) + + case "InSolrPropertyNodeStore" => + val configFile = new File("./testdata/neo4j.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + solrNodeStore.clearAll() + ExternalPropertiesContext.bindCustomPropertyNodeStore(solrNodeStore) + case _ => + } + } + + @After + def shutdowndb(): Unit = { + db.shutdown() + } + + protected def testQuery[T](query: String): Unit = { + val tx = db.beginTx(); + val rs = db.execute(query); + while (rs.hasNext) { + val row = rs.next(); + } + tx.success(); + tx.close() + } +} diff --git a/itest/src/test/scala/external-properties/solrIteratorPerformanceTest.scala b/itest/src/test/scala/external-properties/solrIteratorPerformanceTest.scala new file mode 100644 index 00000000..48e26283 --- /dev/null +++ b/itest/src/test/scala/external-properties/solrIteratorPerformanceTest.scala @@ -0,0 +1,61 @@ +package external + +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.externalprops.{InSolrPropertyNodeStore, SolrQueryResults} +import org.apache.solr.client.solrj.SolrQuery +import org.junit.{Assert, Test} + +class solrIteratorPerformanceTest { + + val configFile = new File("./testdata/codeBabyTest.conf") + val props = new Properties() + props.load(new FileInputStream(configFile)) + val zkString = props.getProperty("external.properties.store.solr.zk") + val collectionName = props.getProperty("external.properties.store.solr.collection") + + //scalastyle:off + @Test + def test1() { + println(zkString) + println(collectionName) + + val solrNodeStore = new InSolrPropertyNodeStore(zkString, collectionName) + + val size = solrNodeStore.getRecorderSize + Assert.assertEquals(13738580, size) + + println(size) + var _startTime = System.currentTimeMillis() + //val query = new SolrQuery("country:Finland") + val query = new SolrQuery("country:Finland") + val res = new SolrQueryResults(solrNodeStore._solrClient, query, 10000) + val it = res.iterator2().toIterable + // it.foreach(u => println(u.id)) + println(it.size) + //it.getCurrentData().foreach(u => println(u)) + /* println(it.getCurrentData().size) + _startTime = System.currentTimeMillis() + var i = 0 + while (it.readNextPage()) { + i += 1 + val _endTime = System.currentTimeMillis() + println({s"$i--Time:${_endTime - _startTime}"}) + //it.getCurrentData().foreach(u => println(u)) + println(it.getCurrentData().size) + } + val _endTime = System.currentTimeMillis() + + println({s"iterator-totalTime:${_endTime - _startTime}"}) + + // res.getAllResults() + + val _endTime1 = System.currentTimeMillis() + + println({s"getall-totalTime:${_endTime1 - _endTime}"}) +*/ + + } + +} diff --git a/itest/src/test/scala/perfomance/PerformanceTests.scala b/itest/src/test/scala/perfomance/PerformanceTests.scala new file mode 100644 index 00000000..67424c1c --- /dev/null +++ b/itest/src/test/scala/perfomance/PerformanceTests.scala @@ -0,0 +1,158 @@ +package perfomance + +import java.io.{File, FileInputStream, FileWriter} +import java.text.SimpleDateFormat +import java.util.{Date, Properties} + +import cn.pandadb.externalprops.{ExternalPropertiesContext, InElasticSearchPropertyNodeStore} +import cn.pandadb.util.GlobalContext +import org.neo4j.graphdb.GraphDatabaseService +import org.neo4j.graphdb.factory.GraphDatabaseFactory + +import scala.io.Source + + +trait TestBase { + + def nowDate: String = { + val now = new Date + val dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") + dateFormat.format(now) + } + +} + + +object Neo4jTests extends TestBase { + + var esNodeStores: Option[InElasticSearchPropertyNodeStore] = None + + def createPandaDB(props: Properties): GraphDatabaseService = { + var graphPath = "" + if (props.containsKey("graph.data.path")) graphPath = props.get("graph.data.path").toString + else throw new Exception("Configure File Error: graph.data.path is not exist! ") + val graphFile = new File(graphPath) + if (!graphFile.exists) throw new Exception(String.format("Error: GraphPath(%s) is not exist! ", graphPath)) + + val esHost = props.getProperty("external.properties.store.es.host") + val esPort = props.getProperty("external.properties.store.es.port").toInt + val esSchema = props.getProperty("external.properties.store.es.schema") + val esIndex = props.getProperty("external.properties.store.es.index") + val esType = props.getProperty("external.properties.store.es.type") + val esScrollSize = props.getProperty("external.properties.store.es.scroll.size", "1000").toInt + val esScrollTime = props.getProperty("external.properties.store.es.scroll.time.minutes", "10").toInt + val esNodeStore = new InElasticSearchPropertyNodeStore(esHost, esPort, esIndex, esType, esSchema, esScrollSize, esScrollTime) + ExternalPropertiesContext.bindCustomPropertyNodeStore(esNodeStore) + GlobalContext.setLeaderNode(true) + esNodeStores = Some(esNodeStore) + + new GraphDatabaseFactory().newEmbeddedDatabase(graphFile) + } + + def createNeo4jDB(props: Properties): GraphDatabaseService = { + var graphPath = "" + if (props.containsKey("graph.data.path")) graphPath = props.get("graph.data.path").toString + else throw new Exception("Configure File Error: graph.data.path is not exist! ") + val graphFile = new File(graphPath) + if (!graphFile.exists) throw new Exception(String.format("Error: GraphPath(%s) is not exist! ", graphPath)) + + new GraphDatabaseFactory().newEmbeddedDatabase(graphFile) + } + + def main(args: Array[String]): Unit = { + + var propFilePath = "/home/bigdata/pandadb-2019/itest/testdata/performance-test.conf" // null; + if (args.length > 0) propFilePath = args(0) + val props = new Properties + props.load(new FileInputStream(new File(propFilePath))) + + var testDb = "neo4j" + var graphPath = "" + var logFileDir = "" + var cyhperFilePath = "" + + if (props.containsKey("test.db")) testDb = props.get("test.db").toString.toLowerCase() + else throw new Exception("Configure File Error: test.db is not exist! ") + + if (props.containsKey("graph.data.path")) graphPath = props.get("graph.data.path").toString + else throw new Exception("Configure File Error: graph.data.path is not exist! ") + + if (props.containsKey("log.file.dir")) logFileDir = props.get("log.file.dir").toString + else throw new Exception("Configure File Error: log.file.dir is not exist! ") + + if (props.containsKey("test.cyhper.path")) cyhperFilePath = props.get("test.cyhper.path").toString + else throw new Exception("Configure File Error: test.cyhper.path is not exist! ") + + val logDir: File = new File(logFileDir) + if (!logDir.exists()) { + logDir.mkdirs + println("make log dir") + } + val logFileName = new SimpleDateFormat("MMdd-HHmmss").format(new Date) + ".log" + val logFile = new File(logDir, logFileName) + + println("Neo4j Test") + println(s"GraphDataPath: ${graphPath} \n LogFilePath: ${logFile.getAbsolutePath}") + + val logFw = new FileWriter(logFile) + logFw.write(s"GraphDataPath: $graphPath \n") + val cyhpers = readCyphers(cyhperFilePath) + var db: GraphDatabaseService = null + + if (testDb.equals("neo4j")) { + println(s"testDB: neo4j \n") + logFw.write(s"testDB: neo4j \n") + db = createNeo4jDB(props) + } + else if (testDb.equals("pandadb")) { + println(s"testDB: pandadb \n") + logFw.write(s"testDB: pandadb \n") + db = createPandaDB(props) + } + + if (db == null) { + throw new Exception("DB is null") + } + + println("==== begin tests ====") + val beginTime = nowDate + println(beginTime) + + try { + var i = 0 + cyhpers.foreach(cyhper => { + i += 1 + val tx = db.beginTx() + val mills0 = System.currentTimeMillis() + val res = db.execute(cyhper) + val useMills = System.currentTimeMillis() - mills0 + tx.close() + println(s"$i, $useMills") + logFw.write(s"\n====\n$cyhper\n") + logFw.write(s"UsedTime(ms): $useMills \n") + logFw.flush() + }) + } + finally { + logFw.close() + if (testDb == "pandadb" && esNodeStores.isDefined) { + esNodeStores.get.esClient.close() + } + db.shutdown() + } + + println("==== end tests ====") + val endTime = nowDate + println("Begin Time: " + beginTime) + println("End Time: " + endTime) + + } + + def readCyphers(filePath: String): Iterable[String] = { + val source = Source.fromFile(filePath, "UTF-8") + val lines = source.getLines().toArray + source.close() + lines + } + +} diff --git a/itest/src/test/scala/release/EnvironmentTest.scala b/itest/src/test/scala/release/EnvironmentTest.scala new file mode 100644 index 00000000..151ee265 --- /dev/null +++ b/itest/src/test/scala/release/EnvironmentTest.scala @@ -0,0 +1,73 @@ +package release + +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.network.{NodeAddress, ZKPathConfig} +import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} +import org.apache.curator.retry.ExponentialBackoffRetry +import org.junit.{Assert, Test} +import EnvironmentTest.{clusterNodes, curator} +import org.neo4j.driver.{AuthTokens, GraphDatabase} + +/** + * @Author: Airzihao + * @Description: This is a comprehensive test for the environment. + * @Date: Created at 10:24 2019/12/20 + * @Modified By: + */ + +object EnvironmentTest { + + val dir = new File("./itest/comprehensive") + if (!dir.exists()) { + dir.mkdirs() + } + val props: Properties = { + val props = new Properties() + props.load(new FileInputStream(new File(s"${dir}/envirTest.properties"))) + props + } + val zkString = props.getProperty("zkServerAddr") + val clusterNodes: Array[NodeAddress] = { + props.getProperty("clusterNodes").split(",").map(str => NodeAddress.fromString(str)) + } + val curator: CuratorFramework = CuratorFrameworkFactory.newClient(zkString, + new ExponentialBackoffRetry(1000, 3)); + curator.start() +} + +// shall this class be established on the driver side? +class EnvironmentTest { + + // test zk environment, make sure the cluster has the access to R/W the ZK cluster + @Test + def test1(): Unit = { + if (curator.checkExists().forPath(ZKPathConfig.registryPath) == null) { + curator.create().forPath(ZKPathConfig.registryPath) + } + curator.delete().deletingChildrenIfNeeded().forPath(ZKPathConfig.registryPath) + Assert.assertEquals(null, curator.checkExists().forPath(ZKPathConfig.registryPath)) + } + + // make sure the driver can access to each node. + @Test + def test2(): Unit = { + clusterNodes.foreach(nodeAddress => { + val boltURI = s"bolt://${nodeAddress.getAsString}" + val driver = GraphDatabase.driver(boltURI, AuthTokens.basic("", "")) + val session = driver.session() + val tx = session.beginTransaction() + tx.success() + tx.close() + session.close() + driver.close() + }) + } + + // how to make sure the node has access to each other? + @Test + def test3(): Unit = { + + } +} diff --git a/itest/src/test/scala/release/PerformanceTest.scala b/itest/src/test/scala/release/PerformanceTest.scala new file mode 100644 index 00000000..179d8942 --- /dev/null +++ b/itest/src/test/scala/release/PerformanceTest.scala @@ -0,0 +1,254 @@ +package release + +import java.io.{File, FileInputStream, PrintWriter} +import java.util.Properties +import java.util.concurrent.TimeoutException + +import cn.pandadb.driver.{EASY_ROUND, RANDOM_PICK, ROBIN_ROUND, SelectNode} +import com.google.gson.GsonBuilder +import org.junit.{Assert, Test} +import org.neo4j.driver.{AuthTokens, Driver, GraphDatabase, StatementResult} + +import scala.collection.mutable.ListBuffer +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.duration._ +import scala.concurrent.{Await, Future} +import scala.io.Source + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 11:40 2019/12/11 + * @Modified By: + */ +abstract class PerformanceTest { + + val dir = new File("./itest/performance") + if (!dir.exists()) { + dir.mkdirs() + } + + val outputDir = new File(s"${dir}/output") + if (!outputDir.exists()) { + outputDir.mkdirs() + } + + val gson = new GsonBuilder().enableComplexMapKeySerialization().create() + + val props: Properties = { + val props = new Properties() + props.load(new FileInputStream(new File(s"${dir}/performanceConf.properties"))) + props + } + + def getRecordFile(fileName: String): File = { + val recordFile = new File(s"${outputDir}/${fileName}") + if(!recordFile.exists()) { + recordFile.createNewFile() + } + recordFile + } + + def getStatementsIter(fileName: String): Iterator[String] = { + val statementFile = new File(s"${dir}/${fileName}") + val source = Source.fromFile(statementFile, "utf-8") + val lineIterator = source.getLines() + lineIterator + } + + def executeCypher[T <: Driver](cypher: String, driver: T): (Array[Long], StatementResult) = { + val _time0 = System.currentTimeMillis() + + val session = driver.session() + val _time1 = System.currentTimeMillis() + + val tx = session.beginTransaction() + val _time2 = System.currentTimeMillis() + + val ans = tx.run(cypher) + val _time3 = System.currentTimeMillis() + + tx.success() + tx.close() + val _time4 = System.currentTimeMillis() + + session.close() + val _time5 = System.currentTimeMillis() + (Array(_time0, _time1, _time2, _time3, _time4, _time5), ans) + } + + def fullTest(recordFile: File, recorder: PrintWriter, cmdIter: Iterator[String], driverArgs: Array[String]): Map[String, StatementResult] = { + var ansMap: Map[String, StatementResult] = Map() + + val cmdArray = cmdIter.toArray + val _startTime = System.currentTimeMillis() + val resultLog = new ListBuffer[Future[ResultMap]] + + cmdArray.foreach(cypher => { + val logItem = Future[ResultMap] { + val driver = GraphDatabase.driver(driverArgs(0), AuthTokens.basic(driverArgs(1), driverArgs(2))) + val result = executeCypher(cypher, driver) + val resultMap = new ResultMap(cypher, result._1) + ansMap += (cypher -> result._2) + resultMap + } + resultLog.append(logItem) + }) + + var _i = 0 + var _successed = 0 + var _failed = 0 + val sum = resultLog.length + resultLog.foreach(logItem => { + val resultMap: ResultMap = try { + _i = _i + 1 + // scalastyle:off + println(s"Waiting for the ${_i}th of ${sum} result, ${_successed} successed, ${_failed} timeout.") + val resultMap = Await.result(logItem, 300.seconds) + _successed += 1 + resultMap + } catch { + case timeout: TimeoutException => + _failed += 1 + val _timeOutArray = Array(-1.toLong, -1.toLong, -1.toLong, -1.toLong, -1.toLong, -1.toLong) + val cypher = cmdArray(_i-1) + new ResultMap(cypher, _timeOutArray) + } + val line = gson.toJson(resultMap.getResultMap) + "\n" + recorder.write(line) + recorder.flush() + }) + val _endTime = System.currentTimeMillis() + recorder.write({s"totalTime:${_endTime - _startTime}"}) + recorder.flush() + ansMap + } + + def fullTest(recordFile: File, recorder: PrintWriter, cmdIter: Iterator[String], driver: Driver): Map[String, StatementResult] = { + var ansMap: Map[String, StatementResult] = Map() + + val cmdArray = cmdIter.toArray + val _startTime = System.currentTimeMillis() + val resultLog = new ListBuffer[Future[ResultMap]] + + cmdArray.foreach(cypher => { + val logItem = Future[ResultMap] { + val result = executeCypher(cypher, driver) + val resultMap = new ResultMap(cypher, result._1) + ansMap += (cypher -> result._2) + resultMap + } + resultLog.append(logItem) + }) + + var _i = 0 + var _successed = 0 + var _failed = 0 + val sum = resultLog.length + resultLog.foreach(logItem => { + val resultMap: ResultMap = try { + _i = _i + 1 + // scalastyle:off + println(s"Waiting for the ${_i}th of ${sum} result, ${_successed} successed, ${_failed} timeout.") + val resultMap = Await.result(logItem, 300.seconds) + _successed += 1 + resultMap + } catch { + case timeout: TimeoutException => + _failed += 1 + val _timeOutArray = Array(-1.toLong, -1.toLong, -1.toLong, -1.toLong, -1.toLong, -1.toLong) + val cypher = cmdArray(_i-1) + new ResultMap(cypher, _timeOutArray) + } + val line = gson.toJson(resultMap.getResultMap) + "\n" + recorder.write(line) + recorder.flush() + }) + val _endTime = System.currentTimeMillis() + recorder.write({s"totalTime:${_endTime - _startTime}"}) + recorder.flush() + ansMap + } + +} + +class Neo4jPerformanceTest extends PerformanceTest { + + val recordFile = getRecordFile(props.getProperty("neo4jResultFile")) + val recorder = new PrintWriter(recordFile) + val cmdIter = getStatementsIter(props.getProperty("statementsFile")) + + val driver = GraphDatabase.driver(props.getProperty("boltURI"), + AuthTokens.basic("neo4j", "bigdata")) + + @Test + def test1(): Unit = { + fullTest(recordFile, recorder, cmdIter, driver) + } + +} + +class PandaDBPerformanceTest extends PerformanceTest { + val recordFile = getRecordFile(props.getProperty("PandaDBResultFile")) + val recorder = new PrintWriter(recordFile) + val cmdIter = getStatementsIter(props.getProperty("statementsFile")) +// SelectNode.setPolicy(ne) + SelectNode.setPolicy(new EASY_ROUND) + val pandaDriver = GraphDatabase.driver(s"panda://${props.getProperty("zkServerAddr")}/db", + AuthTokens.basic("", "")) + + @Test + def test1(): Unit = { + fullTest(recordFile, recorder, cmdIter, pandaDriver) + } +} + +class MergePerformanceTest extends PandaDBPerformanceTest { + + // just run test0, you will get both neo4j and panda test result + @Test + def test0(): Unit = { + val list = List(2) + list.foreach(i => circularTest(i)) + } + + def circularTest(time: Int): Unit = { + val pandaResult = pandaTest(time) + Thread.sleep(5000) + val neo4jResult = neo4jTest(time) +// Await.result(pandaResult, Duration.Inf) +// Await.result(neo4jResult, Duration.Inf) + neo4jResult.foreach( r => { + val n = r._2 + println(r._1) + if(pandaResult.contains(r._1)) { + val p = pandaResult.get(r._1).get + Assert.assertEquals(n.hasNext, p.hasNext) + while (n.hasNext) { + Assert.assertEquals(n.next(), p.next()) + } + Assert.assertEquals(n.hasNext, p.hasNext) + } + }) + } + + def pandaTest(time: Int): Map[String, StatementResult] = { + val pRecordFile = getRecordFile(s"hugepanda${time}.txt") + val pRecorder = new PrintWriter(pRecordFile) + val pCmdIter = getStatementsIter(props.getProperty("statementsFile")) + SelectNode.setPolicy(new EASY_ROUND) + val pandaDriver = GraphDatabase.driver(s"panda://${props.getProperty("zkServerAddr")}/db", + AuthTokens.basic("", "")) +// fullTest(pRecordFile, pRecorder, pCmdIter, Array(s"panda://${props.getProperty("zkServerAddr")}/db", "", "")) + fullTest(pRecordFile, pRecorder, pCmdIter, pandaDriver) + } + + def neo4jTest(time: Int): Map[String, StatementResult] = { + val nRecordFile = getRecordFile(s"hugeneo4j${time}.txt") + val nRecorder = new PrintWriter(nRecordFile) + val nCmdIter = getStatementsIter(props.getProperty("statementsFile")) +// val neo4jDriver = GraphDatabase.driver(props.getProperty("boltURI"), +// AuthTokens.basic("neo4j", "bigdata")) + fullTest(nRecordFile, nRecorder, nCmdIter, Array(props.getProperty("boltURI"), "neo4j", "bigdata")) + } +} \ No newline at end of file diff --git a/itest/src/test/scala/release/ResultMap.scala b/itest/src/test/scala/release/ResultMap.scala new file mode 100644 index 00000000..f5439371 --- /dev/null +++ b/itest/src/test/scala/release/ResultMap.scala @@ -0,0 +1,63 @@ +package release + +import scala.collection.JavaConverters._ +import scala.collection.mutable + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 16:33 2019/12/11 + * @Modified By: + */ + +class ResultMap(cypher: String, timeList: Array[Long]) { + + private val _resultMap = mutable.Map[String, Any]() + + def put[T](key: String, value: T): T = { + _resultMap(key) = value + value + }; + + def getResultMap: java.util.Map[String, Any] = { + this._putCypher + if (timeList.sum == -6) { + _resultMap += ("sessionCreation" -> -1) + _resultMap += ("txCreation" -> -1) + _resultMap += ("executionTime" -> -1) + _resultMap += ("txClose" -> -1) + _resultMap += ("sessionClose" -> -1) + _resultMap += ("totalRespTime" -> -1) + } else { + this._putSessionCreationTime + this._putTxCreationTime + this._putExecutionTime + this._putTxCloseTime + this._putSessionCloseTime + this._putRespTime + } + _resultMap.toMap.asJava + } + + private def _putCypher: Unit = { + this.put("cypher", cypher) + } + private def _putSessionCreationTime: Unit = { + this.put("sessionCreation", (timeList(1) - timeList(0)).toInt) + } + private def _putTxCreationTime: Unit = { + this.put("txCreation", (timeList(2) - timeList(1)).toInt) + } + private def _putExecutionTime: Unit = { + this.put("executionTime", (timeList(3) - timeList(2)).toInt) + } + private def _putTxCloseTime: Unit = { + this.put("txClose", (timeList(4) - timeList(3)).toInt) + } + private def _putSessionCloseTime: Unit = { + this.put("sessionClose", (timeList(5) - timeList(4)).toInt) + } + private def _putRespTime: Unit = { + this.put("totalRespTime", (timeList(5) - timeList(0)).toInt) + } +} diff --git a/testdata/car1.jpg b/itest/testdata/car1.jpg similarity index 100% rename from testdata/car1.jpg rename to itest/testdata/car1.jpg diff --git a/itest/testdata/codeBabyTest.conf b/itest/testdata/codeBabyTest.conf new file mode 100644 index 00000000..b63eba37 --- /dev/null +++ b/itest/testdata/codeBabyTest.conf @@ -0,0 +1,40 @@ +dbms.security.auth_enabled=false +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=0.0.0.0:7687 + +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=localhost:7474 +dbms.connector.https.enabled=false +dbms.logs.http.enabled=true + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +blob.storage.hbase.zookeeper.port=2181 +blob.storage.hbase.zookeeper.quorum=localhost +blob.storage.hbase.auto_create_table=true +blob.storage.hbase.table=PIDB_BLOB + +blob.aipm.modules.enabled=false +blob.aipm.modules.dir=/usr/local/aipm/modules/ + +#blob.storage=cn.pidb.engine.FileBlobValueStorage +#blob.storage.file.dir=/tmp + +#dbms.active_database=testdb +aipm.http.host.url=http://10.0.86.128:8081/ +external.properties.store.factory=cn.pandadb.externalprops.InSolrPropertyNodeStoreFactory +#external.properties.store.solr.zk=10.0.86.179:2181,10.0.87.45:2181,10.0.87.46:2181 +#external.properties.store.solr.collection=graiphdb + + +#external.property.storage.enabled=true +#external.properties.store.factory=org.neo4j.kernel.impl.InSolrPropertyNodeStoreFactory +external.properties.store.solr.zk=10.0.82.216:2181,10.0.82.217:2181,10.0.82.218:2181 +external.properties.store.solr.collection=panda-1300W + +#zookeeper.address=10.0.82.216:2181,10.0.82.217:2181 +#node.server.address=10.0.82.216:7685 +#localIpAddress=10.0.82.216 +#rpcPort=1224 diff --git a/itest/testdata/gnode0.conf b/itest/testdata/gnode0.conf new file mode 100644 index 00000000..a87289cc --- /dev/null +++ b/itest/testdata/gnode0.conf @@ -0,0 +1,32 @@ +dbms.security.auth_enabled=false +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=0.0.0.0:7685 + +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=0.0.0.0:7469 +dbms.connector.https.enabled=false +dbms.logs.http.enabled=true + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +blob.storage.hbase.zookeeper.port=2181 +blob.storage.hbase.zookeeper.quorum=localhost +blob.storage.hbase.auto_create_table=true +blob.storage.hbase.table=PIDB_BLOB + +blob.aipm.modules.enabled=false +blob.aipm.modules.dir=/usr/local/aipm/modules/ + +#blob.storage=cn.pidb.engine.FileBlobValueStorage +#blob.storage.file.dir=/tmp + +#dbms.active_database=testdb +aipm.http.host.url=http://10.0.86.128:8081/ + +#zookeeper.address=10.0.86.26:2181,10.0.86.27:2181,10.0.86.70:2181 +zookeeper.address=10.0.82.216:2181 +node.server.address=0.0.0.0:7685 +localIpAddress=0.0.0.0 +rpc.port=1224 \ No newline at end of file diff --git a/testdata/neo4j.conf b/itest/testdata/gnode1.conf similarity index 70% rename from testdata/neo4j.conf rename to itest/testdata/gnode1.conf index 40a521d8..56403bf6 100644 --- a/testdata/neo4j.conf +++ b/itest/testdata/gnode1.conf @@ -1,10 +1,11 @@ dbms.security.auth_enabled=false dbms.connector.bolt.enabled=true dbms.connector.bolt.tls_level=OPTIONAL -dbms.connector.bolt.listen_address=:7687 +dbms.connector.bolt.listen_address=0.0.0.0:7686 dbms.connector.http.enabled=true -dbms.connector.http.listen_address=localhost:7474 +dbms.connector.http.listen_address=0.0.0.0:7470 +dbms.connector.https.enabled=false dbms.logs.http.enabled=true blob.plugins.conf=./cypher-plugins.xml @@ -23,3 +24,8 @@ blob.aipm.modules.dir=/usr/local/aipm/modules/ #dbms.active_database=testdb aipm.http.host.url=http://10.0.86.128:8081/ + +zookeeper.address=10.0.86.26:2181,10.0.86.27:2181,10.0.86.70:2181 +node.server.address=10.0.87.7:7686 +localIpAddress=10.0.87.7 +rpcPort=1225 \ No newline at end of file diff --git a/itest/testdata/gnode2.conf b/itest/testdata/gnode2.conf new file mode 100644 index 00000000..40c95497 --- /dev/null +++ b/itest/testdata/gnode2.conf @@ -0,0 +1,31 @@ +dbms.security.auth_enabled=false +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=0.0.0.0:7685 + +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=0.0.0.0:7469 +dbms.connector.https.enabled=false +dbms.logs.http.enabled=true + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +blob.storage.hbase.zookeeper.port=2181 +blob.storage.hbase.zookeeper.quorum=localhost +blob.storage.hbase.auto_create_table=true +blob.storage.hbase.table=PIDB_BLOB + +blob.aipm.modules.enabled=false +blob.aipm.modules.dir=/usr/local/aipm/modules/ + +#blob.storage=cn.pidb.engine.FileBlobValueStorage +#blob.storage.file.dir=/tmp + +#dbms.active_database=testdb +aipm.http.host.url=http://10.0.86.128:8081/ + +zookeeper.address=10.0.86.26:2181,10.0.86.27:2181,10.0.86.70:2181 +node.server.address=10.0.87.9:7685 +localIpAddress=10.0.87.7 +rpcPort=1226 \ No newline at end of file diff --git a/itest/testdata/gnode3.conf b/itest/testdata/gnode3.conf new file mode 100644 index 00000000..502b84e8 --- /dev/null +++ b/itest/testdata/gnode3.conf @@ -0,0 +1,31 @@ +dbms.security.auth_enabled=false +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=0.0.0.0:7686 + +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=0.0.0.0:7470 +dbms.connector.https.enabled=false +dbms.logs.http.enabled=true + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +blob.storage.hbase.zookeeper.port=2181 +blob.storage.hbase.zookeeper.quorum=localhost +blob.storage.hbase.auto_create_table=true +blob.storage.hbase.table=PIDB_BLOB + +blob.aipm.modules.enabled=false +blob.aipm.modules.dir=/usr/local/aipm/modules/ + +#blob.storage=cn.pidb.engine.FileBlobValueStorage +#blob.storage.file.dir=/tmp + +#dbms.active_database=testdb +aipm.http.host.url=http://10.0.86.128:8081/ + +zookeeper.address=10.0.86.26:2181,10.0.86.27:2181,10.0.86.70:2181 +node.server.address=10.0.87.9:7686 +localIpAddress=10.0.87.7 +rpcPort=1227 \ No newline at end of file diff --git a/itest/testdata/localnode0.conf b/itest/testdata/localnode0.conf new file mode 100644 index 00000000..35319a86 --- /dev/null +++ b/itest/testdata/localnode0.conf @@ -0,0 +1,36 @@ +dbms.security.auth_enabled=false +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=0.0.0.0:7684 + +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=0.0.0.0:7468 +dbms.connector.https.enabled=false +dbms.logs.http.enabled=true + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +blob.storage.hbase.zookeeper.port=2181 +blob.storage.hbase.zookeeper.quorum=localhost +blob.storage.hbase.auto_create_table=true +blob.storage.hbase.table=PIDB_BLOB + +blob.aipm.modules.enabled=false +blob.aipm.modules.dir=/usr/local/aipm/modules/ + +#blob.storage=cn.pidb.engine.FileBlobValueStorage +#blob.storage.file.dir=/tmp + +#dbms.active_database=testdb +aipm.http.host.url=http://10.0.86.128:8081/ + +zookeeper.address=10.0.82.216:2181 +localIpAddress=159.226.193.204 +node.server.address=159.226.193.204:7684 +rpc.port=1224 + +#external.properties.store.factory=cn.pandadb.externalprops.InSolrPropertyNodeStoreFactory +#external.properties.store.solr.zk=10.0.82.216:2181,10.0.82.217:2181,10.0.82.218:2181 +#external.properties.store.solr.collection=panda-1300W +#external.property.storage.enabled=true \ No newline at end of file diff --git a/testdata/lqd.jpeg b/itest/testdata/lqd.jpeg similarity index 100% rename from testdata/lqd.jpeg rename to itest/testdata/lqd.jpeg diff --git a/testdata/mayun1.jpeg b/itest/testdata/mayun1.jpeg similarity index 100% rename from testdata/mayun1.jpeg rename to itest/testdata/mayun1.jpeg diff --git a/testdata/mayun2.jpeg b/itest/testdata/mayun2.jpeg similarity index 100% rename from testdata/mayun2.jpeg rename to itest/testdata/mayun2.jpeg diff --git a/itest/testdata/neo4j.conf b/itest/testdata/neo4j.conf new file mode 100644 index 00000000..7fe8aa82 --- /dev/null +++ b/itest/testdata/neo4j.conf @@ -0,0 +1,39 @@ +dbms.security.auth_enabled=false +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=0.0.0.0:7687 + +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=localhost:7474 +dbms.connector.https.enabled=false +dbms.logs.http.enabled=true + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +blob.storage.hbase.zookeeper.port=2181 +blob.storage.hbase.zookeeper.quorum=localhost +blob.storage.hbase.auto_create_table=true +blob.storage.hbase.table=PIDB_BLOB + +blob.aipm.modules.enabled=false +blob.aipm.modules.dir=/usr/local/aipm/modules/ + +#blob.storage=cn.pidb.engine.FileBlobValueStorage +#blob.storage.file.dir=/tmp + +#dbms.active_database=testdb +aipm.http.host.url=http://10.0.86.128:8081/ +external.properties.store.factory= org.neo4j.kernel.impl.InSolrPropertyNodeStoreFactory +external.properties.store.solr.zk=10.0.86.179:2181,10.0.87.45:2181,10.0.87.46:2181 +external.properties.store.solr.collection=graiphdb + +# ElasticSearch store +external.properties.store.factory= org.neo4j.kernel.impl.InElasticSearchPropertyNodeStoreFactory +external.properties.store.es.host=10.0.82.218 +external.properties.store.es.port=9200 +external.properties.store.es.schema=http +external.properties.store.es.scroll.size=1000 +external.properties.store.es.scroll.time.minutes=10 +external.properties.store.es.index=test-0119 +external.properties.store.es.type=nodes diff --git a/itest/testdata/performance-test.conf b/itest/testdata/performance-test.conf new file mode 100644 index 00000000..59f19596 --- /dev/null +++ b/itest/testdata/performance-test.conf @@ -0,0 +1,16 @@ +# test: neo4j/pandadb +test.db=pandadb +test.cyhper.path=/home/bigdata/pandadb-2019/itest/testdata/cyhper.txt +graph.data.path=/home/bigdata/panda_output/graph.db1 +log.file.dir=/home/bigdata/pandadb-2019/itest/test-logs/ + + +# ElasticSearch store +external.properties.store.factory=org.neo4j.kernel.impl.InElasticSearchPropertyNodeStoreFactory +external.properties.store.es.host=10.0.82.216 +external.properties.store.es.port=9200 +external.properties.store.es.schema=http +external.properties.store.es.scroll.size=1000 +external.properties.store.es.scroll.time.minutes=10 +external.properties.store.es.index=pandadb +external.properties.store.es.type=nodes diff --git a/testdata/test.csv b/itest/testdata/test.csv similarity index 100% rename from testdata/test.csv rename to itest/testdata/test.csv diff --git a/testdata/test.png b/itest/testdata/test.png similarity index 100% rename from testdata/test.png rename to itest/testdata/test.png diff --git a/testdata/test.wav b/itest/testdata/test.wav similarity index 100% rename from testdata/test.wav rename to itest/testdata/test.wav diff --git a/testdata/test1.png b/itest/testdata/test1.png similarity index 100% rename from testdata/test1.png rename to itest/testdata/test1.png diff --git a/testdata/test2.jpg b/itest/testdata/test2.jpg similarity index 100% rename from testdata/test2.jpg rename to itest/testdata/test2.jpg diff --git a/java-driver/pom.xml b/java-driver/pom.xml new file mode 100644 index 00000000..4e0c9ccc --- /dev/null +++ b/java-driver/pom.xml @@ -0,0 +1,79 @@ + + + + cn.pandadb + parent + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + java-driver + + + + cn.pandadb + blob-commons + ${pandadb.version} + compile + + + cn.pandadb + network-commons + ${pandadb.version} + compile + + + org.neo4j.driver + neo4j-java-driver + 2.0.0-alpha03 + + + + + + + net.alchim31.maven + scala-maven-plugin + 3.2.1 + + + scala-compile-first + process-resources + + add-source + compile + + + + + + maven-assembly-plugin + + + + + + + + + jar-with-dependencies + + + + + make-assembly + package + + single + + + + + + + + \ No newline at end of file diff --git a/src/graiph-driver/java/org/neo4j/driver/GraphDatabase.java b/java-driver/src/main/java/org/neo4j/driver/GraphDatabase.java similarity index 87% rename from src/graiph-driver/java/org/neo4j/driver/GraphDatabase.java rename to java-driver/src/main/java/org/neo4j/driver/GraphDatabase.java index 07b6207c..dd17407b 100644 --- a/src/graiph-driver/java/org/neo4j/driver/GraphDatabase.java +++ b/java-driver/src/main/java/org/neo4j/driver/GraphDatabase.java @@ -20,6 +20,7 @@ import java.net.URI; +import cn.pandadb.driver.PandaDriver; import org.neo4j.driver.internal.DriverFactory; import org.neo4j.driver.internal.cluster.RoutingSettings; import org.neo4j.driver.internal.retry.RetrySettings; @@ -116,6 +117,12 @@ public static Driver driver( URI uri, AuthToken authToken ) */ public static Driver driver( String uri, AuthToken authToken, Config config ) { + //NOTE: pandadb + if(uri.startsWith("panda://")) { + return PandaDriver.create(uri, authToken, config); + } + //NOTE + return driver( URI.create( uri ), authToken, config ); } @@ -154,19 +161,40 @@ public static Driver routingDriver( Iterable routingUris, AuthToken authTok for ( URI uri : routingUris ) { + final Driver driver = driver( uri, authToken, config ); try { - return driver( uri, authToken, config ); + driver.verifyConnectivity(); + return driver; } catch ( ServiceUnavailableException e ) { log.warn( "Unable to create routing driver for URI: " + uri, e ); + closeDriver( driver, uri, log ); + } + catch ( Throwable e ) + { + // for any other errors, we first close the driver and then rethrow the original error out. + closeDriver( driver, uri, log ); + throw e; } } throw new ServiceUnavailableException( "Failed to discover an available server" ); } + private static void closeDriver( Driver driver, URI uri, Logger log ) + { + try + { + driver.close(); + } + catch ( Throwable closeError ) + { + log.warn( "Unable to close driver towards URI: " + uri, closeError ); + } + } + private static void assertRoutingUris( Iterable uris ) { for ( URI uri : uris ) diff --git a/src/graiph-driver/java/org/neo4j/driver/Value.java b/java-driver/src/main/java/org/neo4j/driver/Value.java similarity index 99% rename from src/graiph-driver/java/org/neo4j/driver/Value.java rename to java-driver/src/main/java/org/neo4j/driver/Value.java index fc452404..137b9d53 100644 --- a/src/graiph-driver/java/org/neo4j/driver/Value.java +++ b/java-driver/src/main/java/org/neo4j/driver/Value.java @@ -44,7 +44,7 @@ import org.neo4j.driver.util.Experimental; import java.util.function.Function; import org.neo4j.driver.util.Immutable; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; /** * A unit of data that adheres to the Neo4j type system. diff --git a/src/graiph-driver/java/org/neo4j/driver/Values.java b/java-driver/src/main/java/org/neo4j/driver/Values.java similarity index 99% rename from src/graiph-driver/java/org/neo4j/driver/Values.java rename to java-driver/src/main/java/org/neo4j/driver/Values.java index ff379a91..2ec19853 100644 --- a/src/graiph-driver/java/org/neo4j/driver/Values.java +++ b/java-driver/src/main/java/org/neo4j/driver/Values.java @@ -50,7 +50,7 @@ import org.neo4j.driver.types.Point; import org.neo4j.driver.types.Relationship; import org.neo4j.driver.types.TypeSystem; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import java.util.function.Function; diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ByteBufInput.java b/java-driver/src/main/java/org/neo4j/driver/internal/async/inbound/ByteBufInput.java similarity index 96% rename from src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ByteBufInput.java rename to java-driver/src/main/java/org/neo4j/driver/internal/async/inbound/ByteBufInput.java index 5cab52fe..52de5e25 100644 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ByteBufInput.java +++ b/java-driver/src/main/java/org/neo4j/driver/internal/async/inbound/ByteBufInput.java @@ -18,7 +18,7 @@ */ package org.neo4j.driver.internal.async.inbound; -import io.netty.buffer.ByteBuf; +import org.neo4j.driver.internal.shaded.io.netty.buffer.ByteBuf; import org.neo4j.driver.internal.packstream.PackInput; diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/InboundMessageHandler.java b/java-driver/src/main/java/org/neo4j/driver/internal/async/inbound/InboundMessageHandler.java similarity index 87% rename from src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/InboundMessageHandler.java rename to java-driver/src/main/java/org/neo4j/driver/internal/async/inbound/InboundMessageHandler.java index 3fbb4eff..11317699 100644 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/InboundMessageHandler.java +++ b/java-driver/src/main/java/org/neo4j/driver/internal/async/inbound/InboundMessageHandler.java @@ -18,17 +18,17 @@ */ package org.neo4j.driver.internal.async.inbound; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.handler.codec.DecoderException; +import org.neo4j.driver.internal.shaded.io.netty.buffer.ByteBuf; +import org.neo4j.driver.internal.shaded.io.netty.channel.ChannelHandlerContext; +import org.neo4j.driver.internal.shaded.io.netty.channel.SimpleChannelInboundHandler; +import org.neo4j.driver.internal.shaded.io.netty.handler.codec.DecoderException; import org.neo4j.driver.internal.logging.ChannelActivityLogger; import org.neo4j.driver.internal.messaging.MessageFormat; import org.neo4j.driver.Logger; import org.neo4j.driver.Logging; -import static io.netty.buffer.ByteBufUtil.hexDump; +import static org.neo4j.driver.internal.shaded.io.netty.buffer.ByteBufUtil.hexDump; import static java.util.Objects.requireNonNull; import static org.neo4j.driver.internal.async.connection.ChannelAttributes.messageDispatcher; diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/ValueUnpackerV1.java b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v1/ValueUnpackerV1.java similarity index 100% rename from src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/ValueUnpackerV1.java rename to java-driver/src/main/java/org/neo4j/driver/internal/messaging/v1/ValueUnpackerV1.java diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/ValuePackerV2.java b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v2/ValuePackerV2.java similarity index 84% rename from src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/ValuePackerV2.java rename to java-driver/src/main/java/org/neo4j/driver/internal/messaging/v2/ValuePackerV2.java index 6b8c9ae5..6d7500ae 100644 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/ValuePackerV2.java +++ b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v2/ValuePackerV2.java @@ -32,6 +32,7 @@ import org.neo4j.driver.internal.messaging.v1.ValuePackerV1; import org.neo4j.driver.internal.packstream.PackOutput; import org.neo4j.driver.internal.types.TypeConstructor; +import org.neo4j.driver.internal.util.BoltClientBlobIO; import org.neo4j.driver.internal.value.InternalValue; import org.neo4j.driver.types.IsoDuration; import org.neo4j.driver.types.Point; @@ -66,6 +67,14 @@ public ValuePackerV2( PackOutput output ) protected void packInternalValue( InternalValue value ) throws IOException { TypeConstructor typeConstructor = value.typeConstructor(); + + //NOTE: blob + if (TypeConstructor.BLOB == typeConstructor) { + BoltClientBlobIO.packBlob(value.asBlob(), packer); + return; + } + //NOTE + switch ( typeConstructor ) { case DATE: @@ -96,7 +105,7 @@ protected void packInternalValue( InternalValue value ) throws IOException private void packDate( LocalDate localDate ) throws IOException { - packer.packStructHeader( DATE_STRUCT_SIZE, DATE ); + packer.packStructHeader( MessageFormatV2.DATE_STRUCT_SIZE, MessageFormatV2.DATE ); packer.pack( localDate.toEpochDay() ); } @@ -105,14 +114,14 @@ private void packTime( OffsetTime offsetTime ) throws IOException long nanoOfDayLocal = offsetTime.toLocalTime().toNanoOfDay(); int offsetSeconds = offsetTime.getOffset().getTotalSeconds(); - packer.packStructHeader( TIME_STRUCT_SIZE, TIME ); + packer.packStructHeader( MessageFormatV2.TIME_STRUCT_SIZE, MessageFormatV2.TIME ); packer.pack( nanoOfDayLocal ); packer.pack( offsetSeconds ); } private void packLocalTime( LocalTime localTime ) throws IOException { - packer.packStructHeader( LOCAL_TIME_STRUCT_SIZE, LOCAL_TIME ); + packer.packStructHeader( MessageFormatV2.LOCAL_TIME_STRUCT_SIZE, MessageFormatV2.LOCAL_TIME ); packer.pack( localTime.toNanoOfDay() ); } @@ -121,7 +130,7 @@ private void packLocalDateTime( LocalDateTime localDateTime ) throws IOException long epochSecondUtc = localDateTime.toEpochSecond( UTC ); int nano = localDateTime.getNano(); - packer.packStructHeader( LOCAL_DATE_TIME_STRUCT_SIZE, LOCAL_DATE_TIME ); + packer.packStructHeader( MessageFormatV2.LOCAL_DATE_TIME_STRUCT_SIZE, MessageFormatV2.LOCAL_DATE_TIME ); packer.pack( epochSecondUtc ); packer.pack( nano ); } @@ -136,7 +145,7 @@ private void packZonedDateTime( ZonedDateTime zonedDateTime ) throws IOException { int offsetSeconds = ((ZoneOffset) zone).getTotalSeconds(); - packer.packStructHeader( DATE_TIME_STRUCT_SIZE, DATE_TIME_WITH_ZONE_OFFSET ); + packer.packStructHeader( MessageFormatV2.DATE_TIME_STRUCT_SIZE, MessageFormatV2.DATE_TIME_WITH_ZONE_OFFSET ); packer.pack( epochSecondLocal ); packer.pack( nano ); packer.pack( offsetSeconds ); @@ -145,7 +154,7 @@ private void packZonedDateTime( ZonedDateTime zonedDateTime ) throws IOException { String zoneId = zone.getId(); - packer.packStructHeader( DATE_TIME_STRUCT_SIZE, DATE_TIME_WITH_ZONE_ID ); + packer.packStructHeader( MessageFormatV2.DATE_TIME_STRUCT_SIZE, MessageFormatV2.DATE_TIME_WITH_ZONE_ID ); packer.pack( epochSecondLocal ); packer.pack( nano ); packer.pack( zoneId ); @@ -154,7 +163,7 @@ private void packZonedDateTime( ZonedDateTime zonedDateTime ) throws IOException private void packDuration( IsoDuration duration ) throws IOException { - packer.packStructHeader( DURATION_TIME_STRUCT_SIZE, DURATION ); + packer.packStructHeader( MessageFormatV2.DURATION_TIME_STRUCT_SIZE, MessageFormatV2.DURATION ); packer.pack( duration.months() ); packer.pack( duration.days() ); packer.pack( duration.seconds() ); @@ -179,7 +188,7 @@ else if ( point instanceof InternalPoint3D ) private void packPoint2D( Point point ) throws IOException { - packer.packStructHeader( POINT_2D_STRUCT_SIZE, POINT_2D_STRUCT_TYPE ); + packer.packStructHeader( MessageFormatV2.POINT_2D_STRUCT_SIZE, MessageFormatV2.POINT_2D_STRUCT_TYPE ); packer.pack( point.srid() ); packer.pack( point.x() ); packer.pack( point.y() ); @@ -187,7 +196,7 @@ private void packPoint2D( Point point ) throws IOException private void packPoint3D( Point point ) throws IOException { - packer.packStructHeader( POINT_3D_STRUCT_SIZE, POINT_3D_STRUCT_TYPE ); + packer.packStructHeader( MessageFormatV2.POINT_3D_STRUCT_SIZE, MessageFormatV2.POINT_3D_STRUCT_TYPE ); packer.pack( point.srid() ); packer.pack( point.x() ); packer.pack( point.y() ); diff --git a/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v2/ValueUnpackerV2.java b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v2/ValueUnpackerV2.java new file mode 100644 index 00000000..82e57ad4 --- /dev/null +++ b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v2/ValueUnpackerV2.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2002-2019 "Neo4j," + * Neo4j Sweden AB [http://neo4j.com] + * + * This file is part of Neo4j. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.neo4j.driver.internal.messaging.v2; + +import org.neo4j.driver.Value; +import org.neo4j.driver.internal.messaging.v1.ValueUnpackerV1; +import org.neo4j.driver.internal.packstream.PackInput; +import org.neo4j.driver.internal.types.TypeConstructor; +import org.neo4j.driver.internal.util.BoltClientBlobIO; + +import java.io.IOException; +import java.time.*; + +import static java.time.ZoneOffset.UTC; +import static org.neo4j.driver.Values.*; +import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.*; + +public class ValueUnpackerV2 extends ValueUnpackerV1 { + public ValueUnpackerV2(PackInput input) { + super(input); + } + + //NOTE: blob support + protected Value unpack() throws IOException { + Value blobValue = BoltClientBlobIO.unpackBlob(unpacker); + if (blobValue != null) + return blobValue; + + return super.unpack(); + } + //NOTE + + @Override + protected Value unpackStruct(long size, byte type) throws IOException { + switch (type) { + case DATE: + ensureCorrectStructSize(TypeConstructor.DATE, DATE_STRUCT_SIZE, size); + return unpackDate(); + case TIME: + ensureCorrectStructSize(TypeConstructor.TIME, TIME_STRUCT_SIZE, size); + return unpackTime(); + case LOCAL_TIME: + ensureCorrectStructSize(TypeConstructor.LOCAL_TIME, LOCAL_TIME_STRUCT_SIZE, size); + return unpackLocalTime(); + case LOCAL_DATE_TIME: + ensureCorrectStructSize(TypeConstructor.LOCAL_DATE_TIME, LOCAL_DATE_TIME_STRUCT_SIZE, size); + return unpackLocalDateTime(); + case DATE_TIME_WITH_ZONE_OFFSET: + ensureCorrectStructSize(TypeConstructor.DATE_TIME, DATE_TIME_STRUCT_SIZE, size); + return unpackDateTimeWithZoneOffset(); + case DATE_TIME_WITH_ZONE_ID: + ensureCorrectStructSize(TypeConstructor.DATE_TIME, DATE_TIME_STRUCT_SIZE, size); + return unpackDateTimeWithZoneId(); + case DURATION: + ensureCorrectStructSize(TypeConstructor.DURATION, DURATION_TIME_STRUCT_SIZE, size); + return unpackDuration(); + case POINT_2D_STRUCT_TYPE: + ensureCorrectStructSize(TypeConstructor.POINT, POINT_2D_STRUCT_SIZE, size); + return unpackPoint2D(); + case POINT_3D_STRUCT_TYPE: + ensureCorrectStructSize(TypeConstructor.POINT, POINT_3D_STRUCT_SIZE, size); + return unpackPoint3D(); + default: + return super.unpackStruct(size, type); + } + } + + private Value unpackDate() throws IOException { + long epochDay = unpacker.unpackLong(); + return value(LocalDate.ofEpochDay(epochDay)); + } + + private Value unpackTime() throws IOException { + long nanoOfDayLocal = unpacker.unpackLong(); + int offsetSeconds = Math.toIntExact(unpacker.unpackLong()); + + LocalTime localTime = LocalTime.ofNanoOfDay(nanoOfDayLocal); + ZoneOffset offset = ZoneOffset.ofTotalSeconds(offsetSeconds); + return value(OffsetTime.of(localTime, offset)); + } + + private Value unpackLocalTime() throws IOException { + long nanoOfDayLocal = unpacker.unpackLong(); + return value(LocalTime.ofNanoOfDay(nanoOfDayLocal)); + } + + private Value unpackLocalDateTime() throws IOException { + long epochSecondUtc = unpacker.unpackLong(); + int nano = Math.toIntExact(unpacker.unpackLong()); + return value(LocalDateTime.ofEpochSecond(epochSecondUtc, nano, UTC)); + } + + private Value unpackDateTimeWithZoneOffset() throws IOException { + long epochSecondLocal = unpacker.unpackLong(); + int nano = Math.toIntExact(unpacker.unpackLong()); + int offsetSeconds = Math.toIntExact(unpacker.unpackLong()); + return value(newZonedDateTime(epochSecondLocal, nano, ZoneOffset.ofTotalSeconds(offsetSeconds))); + } + + private Value unpackDateTimeWithZoneId() throws IOException { + long epochSecondLocal = unpacker.unpackLong(); + int nano = Math.toIntExact(unpacker.unpackLong()); + String zoneIdString = unpacker.unpackString(); + return value(newZonedDateTime(epochSecondLocal, nano, ZoneId.of(zoneIdString))); + } + + private Value unpackDuration() throws IOException { + long months = unpacker.unpackLong(); + long days = unpacker.unpackLong(); + long seconds = unpacker.unpackLong(); + int nanoseconds = Math.toIntExact(unpacker.unpackLong()); + return isoDuration(months, days, seconds, nanoseconds); + } + + private Value unpackPoint2D() throws IOException { + int srid = Math.toIntExact(unpacker.unpackLong()); + double x = unpacker.unpackDouble(); + double y = unpacker.unpackDouble(); + return point(srid, x, y); + } + + private Value unpackPoint3D() throws IOException { + int srid = Math.toIntExact(unpacker.unpackLong()); + double x = unpacker.unpackDouble(); + double y = unpacker.unpackDouble(); + double z = unpacker.unpackDouble(); + return point(srid, x, y, z); + } + + private static ZonedDateTime newZonedDateTime(long epochSecondLocal, long nano, ZoneId zoneId) { + Instant instant = Instant.ofEpochSecond(epochSecondLocal, nano); + LocalDateTime localDateTime = LocalDateTime.ofInstant(instant, UTC); + return ZonedDateTime.of(localDateTime, zoneId); + } +} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/MessageWriterV3.java b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v3/MessageWriterV3.java similarity index 92% rename from src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/MessageWriterV3.java rename to java-driver/src/main/java/org/neo4j/driver/internal/messaging/v3/MessageWriterV3.java index 2706d5ad..1a45408d 100644 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/MessageWriterV3.java +++ b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v3/MessageWriterV3.java @@ -20,6 +20,8 @@ import java.util.Map; +import cn.pandadb.blob.BlobMessageSignature; +import org.neo4j.driver.internal.GetBlobMessageEncoder; import org.neo4j.driver.internal.messaging.AbstractMessageWriter; import org.neo4j.driver.internal.messaging.MessageEncoder; import org.neo4j.driver.internal.messaging.encode.BeginMessageEncoder; @@ -61,6 +63,10 @@ private static Map buildEncoders() result.put( DiscardAllMessage.SIGNATURE, new DiscardAllMessageEncoder() ); result.put( PullAllMessage.SIGNATURE, new PullAllMessageEncoder() ); + //NOTE: GetBlobMessageEncoder + result.put(BlobMessageSignature.SIGNATURE_GET_BLOB(), new GetBlobMessageEncoder() ); // new + //NOTE + result.put( BeginMessage.SIGNATURE, new BeginMessageEncoder() ); result.put( CommitMessage.SIGNATURE, new CommitMessageEncoder() ); result.put( RollbackMessage.SIGNATURE, new RollbackMessageEncoder() ); diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/MessageWriterV4.java b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v4/MessageWriterV4.java similarity index 92% rename from src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/MessageWriterV4.java rename to java-driver/src/main/java/org/neo4j/driver/internal/messaging/v4/MessageWriterV4.java index 28d6e99f..940396b3 100644 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/MessageWriterV4.java +++ b/java-driver/src/main/java/org/neo4j/driver/internal/messaging/v4/MessageWriterV4.java @@ -20,6 +20,8 @@ import java.util.Map; +import cn.pandadb.blob.BlobMessageSignature; +import org.neo4j.driver.internal.GetBlobMessageEncoder; import org.neo4j.driver.internal.messaging.AbstractMessageWriter; import org.neo4j.driver.internal.messaging.MessageEncoder; import org.neo4j.driver.internal.messaging.encode.BeginMessageEncoder; @@ -61,6 +63,10 @@ private static Map buildEncoders() result.put( DiscardMessage.SIGNATURE, new DiscardMessageEncoder() ); // new result.put( PullMessage.SIGNATURE, new PullMessageEncoder() ); // new + //NOTE: GetBlobMessageEncoder + result.put(BlobMessageSignature.SIGNATURE_GET_BLOB(), new GetBlobMessageEncoder() ); // new + //NOTE + result.put( BeginMessage.SIGNATURE, new BeginMessageEncoder() ); result.put( CommitMessage.SIGNATURE, new CommitMessageEncoder() ); result.put( RollbackMessage.SIGNATURE, new RollbackMessageEncoder() ); diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/types/TypeConstructor.java b/java-driver/src/main/java/org/neo4j/driver/internal/types/TypeConstructor.java similarity index 100% rename from src/graiph-driver/java/org/neo4j/driver/internal/types/TypeConstructor.java rename to java-driver/src/main/java/org/neo4j/driver/internal/types/TypeConstructor.java diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/ValueAdapter.java b/java-driver/src/main/java/org/neo4j/driver/internal/value/ValueAdapter.java similarity index 99% rename from src/graiph-driver/java/org/neo4j/driver/internal/value/ValueAdapter.java rename to java-driver/src/main/java/org/neo4j/driver/internal/value/ValueAdapter.java index a52a7e0b..3d393d9e 100644 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/ValueAdapter.java +++ b/java-driver/src/main/java/org/neo4j/driver/internal/value/ValueAdapter.java @@ -41,7 +41,7 @@ import org.neo4j.driver.types.Point; import org.neo4j.driver.types.Relationship; import org.neo4j.driver.types.Type; -import cn.graiph.blob.Blob; +import cn.pandadb.blob.Blob; import java.util.function.Function; diff --git a/java-driver/src/main/scala/cn/pandadb/driver/PandaDriver.scala b/java-driver/src/main/scala/cn/pandadb/driver/PandaDriver.scala new file mode 100644 index 00000000..1bef857d --- /dev/null +++ b/java-driver/src/main/scala/cn/pandadb/driver/PandaDriver.scala @@ -0,0 +1,216 @@ +package cn.pandadb.driver + +import java.io.IOException +import java.net.URI +import java.security.GeneralSecurityException +import java.util.Collections +import java.{security, util} +import java.util.concurrent.{CompletableFuture, CompletionStage} + +import cn.pandadb.cypherplus.utils.CypherPlusUtils +import cn.pandadb.network.{ClusterClient, NodeAddress, ZookeeperBasedClusterClient} +import org.apache.commons.lang3.NotImplementedException +import org.neo4j.driver.Config.TrustStrategy +import org.neo4j.driver.{Transaction, Value, _} +import org.neo4j.driver.async.{AsyncSession, AsyncStatementRunner, AsyncTransaction, AsyncTransactionWork, StatementResultCursor} +import org.neo4j.driver.exceptions.ClientException +import org.neo4j.driver.internal.async.connection.BootstrapFactory +import org.neo4j.driver.internal.cluster.{RoutingContext, RoutingSettings} +import org.neo4j.driver.internal.{AbstractStatementRunner, BoltServerAddress, DirectConnectionProvider, DriverFactory, SessionConfig, SessionFactory, SessionFactoryImpl} +import org.neo4j.driver.internal.metrics.{InternalMetricsProvider, MetricsProvider} +import org.neo4j.driver.internal.retry.{ExponentialBackoffRetryLogic, RetryLogic, RetrySettings} +import org.neo4j.driver.internal.security.SecurityPlan +import org.neo4j.driver.internal.spi.ConnectionProvider +import org.neo4j.driver.internal.types.InternalTypeSystem +import org.neo4j.driver.internal.util.{Clock, Extract, Futures} +import org.neo4j.driver.internal.value.MapValue +import org.neo4j.driver.reactive.{RxSession, RxStatementResult, RxTransaction, RxTransactionWork} +import org.neo4j.driver.types.TypeSystem +import org.reactivestreams.Publisher + +import scala.collection.JavaConversions +import cn.pandadb.driver.PandaTransaction + +import scala.util.matching.Regex + +/** + * Created by bluejoe on 2019/11/21. + */ +object PandaDriver { + def create(uri: String, authToken: AuthToken, config: Config): Driver = { + new PandaDriver(uri, authToken, config) + } +} + +class PandaDriver(uri: String, authToken: AuthToken, config: Config) extends Driver { + val clusterClient: ClusterClient = createClusterClient(uri); + // val defaultSessionConfig = new SessionConfig() + val defaultSessionConfig = SessionConfig.empty() + override def closeAsync(): CompletionStage[Void] = { + //TODO + new CompletableFuture[Void](); + } + + override def session(): Session = session(defaultSessionConfig) + + override def session(sessionConfig: SessionConfig): Session = new PandaSession(sessionConfig, clusterClient); + + override def defaultTypeSystem(): TypeSystem = InternalTypeSystem.TYPE_SYSTEM + + override def rxSession(): RxSession = { + this.rxSession(defaultSessionConfig) + } + + override def rxSession(sessionConfig: SessionConfig): RxSession = { + throw new NotImplementedException("rxSession") + } + + /** + * verifyConnectivityAsync and verifyConnectivity is not right , because uri is zkString + */ + + override def verifyConnectivityAsync(): CompletionStage[Void] = { + throw new NotImplementedException("verifyConnectivityAsync") + } + + override def verifyConnectivity(): Unit = { + Futures.blockingGet(this.verifyConnectivityAsync()) + } + + override def metrics(): Metrics = { + createDriverMetrics(config, this.createClock()).metrics() + } + private def createDriverMetrics(config: Config, clock: Clock ): MetricsProvider = { + if (config.isMetricsEnabled()) new InternalMetricsProvider(clock) else MetricsProvider.METRICS_DISABLED_PROVIDER + } + private def createClock(): Clock = { + Clock.SYSTEM + } + + override def asyncSession(): AsyncSession = { + this.asyncSession(defaultSessionConfig) + } + + override def asyncSession(sessionConfig: SessionConfig): AsyncSession = { + throw new NotImplementedException("asyncSession") + } + + override def close(): Unit = { + + } + + //wait to finish + override def isEncrypted: Boolean = { + throw new NotImplementedException("isEncrypted") + } + + private def createClusterClient(uri: String): ClusterClient = { + //scalastyle:off + val pattern = new Regex("((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?):[0-9]{1,5}") + val zkString = (pattern findAllIn uri).mkString(",") + new ZookeeperBasedClusterClient(zkString) + } +} + +class PandaSession(sessionConfig: SessionConfig, clusterOperator: ClusterClient) extends Session { + + var session: Session = null + var readDriver: Driver = null + var writeDriver : Driver = null + private def getSession(isWriteStatement: Boolean): Session = { + if (!(this.session==null)) this.session.close() + if (isWriteStatement) { + if (this.writeDriver==null) this.writeDriver = SelectNode.getDriver(isWriteStatement, clusterOperator) + this.session = this.writeDriver.session(sessionConfig) + } else { + if (this.readDriver==null) this.readDriver = SelectNode.getDriver(isWriteStatement, clusterOperator) + this.session = this.readDriver.session(sessionConfig) + } + this.session + } + + override def writeTransaction[T](work: TransactionWork[T]): T = { + this.writeTransaction(work, TransactionConfig.empty()) + } + + + override def writeTransaction[T](work: TransactionWork[T], config: TransactionConfig): T = { + getSession(true).writeTransaction(work, config) + } + + + override def readTransaction[T](work: TransactionWork[T]): T = { + this.readTransaction(work, TransactionConfig.empty()) + } + + override def readTransaction[T](work: TransactionWork[T], config: TransactionConfig): T = { + getSession(false).readTransaction(work, config) + } + + override def run(statement: String, config: TransactionConfig): StatementResult = { + this.run(statement, Collections.emptyMap(), config) + } + + override def run(statement: String, parameters: util.Map[String, AnyRef], config: TransactionConfig): StatementResult = { + this.run(new Statement(statement, parameters), config) + } + + override def run(statement: Statement, config: TransactionConfig): StatementResult = { + val tempState = statement.text().toLowerCase() + val isWriteStatement = CypherPlusUtils.isWriteStatement(tempState) + getSession(isWriteStatement) + this.session.run(statement, config) + } + + override def close(): Unit = { + if (!(this.session == null)) session.close() + if (!(this.writeDriver == null)) this.writeDriver.close() + if (!(this.readDriver == null)) this.readDriver.close() + } + + override def lastBookmark(): String = { + session.lastBookmark() + } + + override def reset(): Unit = { + session.reset() + } + + override def beginTransaction(): Transaction = { + this.beginTransaction(TransactionConfig.empty()) + } + + override def beginTransaction(config: TransactionConfig): Transaction = { + /*isTransaction = true + this.config = config + this.transaction*/ + new PandaTransaction(sessionConfig, config, clusterOperator) + } + + override def run(statementTemplate: String, parameters: Value): StatementResult = { + this.run(new Statement(statementTemplate, parameters)) + } + + override def run(statementTemplate: String, statementParameters: util.Map[String, AnyRef]): StatementResult = { + this.run(statementTemplate, AbstractStatementRunner.parameters(statementParameters)) + } + + override def run(statementTemplate: String, statementParameters: Record): StatementResult = { + //session.run(statementTemplate, statementParameters) AbstractStatementRunner + //this.run(statementTemplate, parameters(statementParameters)) + this.run(statementTemplate, AbstractStatementRunner.parameters(statementParameters)) + } + + override def run(statementTemplate: String): StatementResult = { + this.run(statementTemplate, Values.EmptyMap) + } + + override def run(statement: Statement): StatementResult = { + //session.run(statement) + this.run(statement, TransactionConfig.empty()) + } + + override def isOpen: Boolean = { + session.isOpen + } +} \ No newline at end of file diff --git a/java-driver/src/main/scala/cn/pandadb/driver/PandaTransaction.scala b/java-driver/src/main/scala/cn/pandadb/driver/PandaTransaction.scala new file mode 100644 index 00000000..6dde86e3 --- /dev/null +++ b/java-driver/src/main/scala/cn/pandadb/driver/PandaTransaction.scala @@ -0,0 +1,96 @@ +package cn.pandadb.driver + +import java.util + +import cn.pandadb.cypherplus.utils.CypherPlusUtils +import cn.pandadb.network.{ClusterClient, NodeAddress} +import org.neo4j.driver.internal.{AbstractStatementRunner, SessionConfig} +import org.neo4j.driver.{AuthTokens, Driver, GraphDatabase, Record, Session, Statement, StatementResult, StatementRunner, Transaction, TransactionConfig, Value, Values} +import scala.collection.mutable.ArrayBuffer + +/** + * @Author: codeBabyLin + * @Description: + * @Date: Created in 9:06 2019/11/26 + * @Modified By: + */ + +class PandaTransaction(sessionConfig: SessionConfig, config: TransactionConfig, clusterOperator: ClusterClient) extends Transaction{ + + var transactionArray: ArrayBuffer[Transaction] = ArrayBuffer[Transaction]() //save session + var sessionArray: ArrayBuffer[Session] = ArrayBuffer[Session]() // save transaction + + var transaction: Transaction = _ + var writeTransaction: Transaction = _ + var session: Session = _ + var readDriver: Driver = _ + var writeDriver : Driver = _ + //rule1 one session ,one transaction + //rule2 session closed,transaction does't work + private def getSession(isWriteStatement: Boolean): Session = { + //if (!(this.session==null)) this.session.close() //session the sanme with the Transaction can not close + if (isWriteStatement) { + if (this.writeDriver==null) this.writeDriver = SelectNode.getDriver(isWriteStatement, clusterOperator) + this.session = this.writeDriver.session(sessionConfig) + } else { + if (this.readDriver==null) this.readDriver = SelectNode.getDriver(isWriteStatement, clusterOperator) + this.session = this.readDriver.session(sessionConfig) + } + this.session + } + private def getTransactionReady(isWriteStatement: Boolean): Transaction = { + if (!(this.writeTransaction==null)) this.transaction = this.writeTransaction //reuse the wrtie transaction + else { + this.session = getSession(isWriteStatement) + this.transaction = session.beginTransaction(config) + if(isWriteStatement) this.writeTransaction = this.transaction + this.sessionArray += this.session + this.transactionArray += this.transaction + } + this.transaction + + } + + override def success(): Unit = { + if (this.transactionArray.nonEmpty) this.transactionArray.foreach(trans => trans.success()) + } + + override def failure(): Unit = { + if (this.transactionArray.nonEmpty) this.transactionArray.foreach(trans => trans.failure()) + } + + override def close(): Unit = { + if (this.transactionArray.nonEmpty) this.transactionArray.foreach(trans => trans.close()) + if (this.sessionArray.nonEmpty) this.sessionArray.foreach(sess => sess.close()) + if (!(this.writeDriver == null)) this.writeDriver.close() + if (!(this.readDriver == null)) this.readDriver.close() + } + + override def run(s: String, value: Value): StatementResult = { + this.run(new Statement(s, value)) + } + + override def run(s: String, map: util.Map[String, AnyRef]): StatementResult = { + this.run(s, AbstractStatementRunner.parameters(map)) + } + + override def run(s: String, record: Record): StatementResult = { + this.run(s, AbstractStatementRunner.parameters(record)) + } + + override def run(s: String): StatementResult = { + this.run(s, Values.EmptyMap) + } + + override def run(statement: Statement): StatementResult = { + //transanction could not be closed until close function + val tempState = statement.text().toLowerCase() + val isWriteStatement = CypherPlusUtils.isWriteStatement(tempState) + getTransactionReady(isWriteStatement) + this.transaction.run(statement) + } + + override def isOpen: Boolean = { + if (!(this.transaction == null)) this.transaction.isOpen else true + } +} diff --git a/java-driver/src/main/scala/cn/pandadb/driver/SelectNode.scala b/java-driver/src/main/scala/cn/pandadb/driver/SelectNode.scala new file mode 100644 index 00000000..44ef806f --- /dev/null +++ b/java-driver/src/main/scala/cn/pandadb/driver/SelectNode.scala @@ -0,0 +1,131 @@ +package cn.pandadb.driver + +import cn.pandadb.network.{ClusterClient, NodeAddress} +import org.neo4j.driver.{AuthTokens, Driver, GraphDatabase} + +import scala.collection.mutable + +/** + * @Author: codeBabyLin + * @Description: + * @Date: Created at 20:50 2019/11/27 + * @Modified By: + */ +trait Strategy{ + +} +case class RANDOM_PICK() extends Strategy{ + +} +case class ROBIN_ROUND() extends Strategy{ + +} +case class WORK_TIME_PICK() extends Strategy{ + +} +case class DEFAULT_PICK() extends Strategy{ + +} + +case class EASY_ROUND() extends Strategy{ + +} + +object SelectNode { + + val RONDOM_POLICY = 0 + val _POLICY = 1 + val robinArray = mutable.Map[NodeAddress, Long]() + private var policy: Strategy = null + private var index = 0 + + private def getWriteNode(clusterOperator: ClusterClient): NodeAddress = { + clusterOperator.getWriteMasterNode() + //policyDefault + } + + private def policyRandom(clusterOperator: ClusterClient): NodeAddress = { + + val nodeLists = clusterOperator.getAllNodes().toList + val index = (new util.Random).nextInt(nodeLists.length) + nodeLists(index) + + } + + private def policyRobinRound(clusterOperator: ClusterClient): NodeAddress = { + if (robinArray.size == 0) { + clusterOperator.getAllNodes().foreach(node => robinArray += node -> 0) + } + val list = robinArray.toList + val sorted = list.sortBy(node => node._2) + val head = sorted.head + val node = robinArray.toList.sortBy(u => u._2).head._1 + robinArray(node) += 1 + node + } + + private def easyRound(clusterOperator: ClusterClient): NodeAddress = { + val nodeLists = clusterOperator.getAllNodes().toList + if (this.index>=nodeLists.length) { + this.index = 0 + } + val node = nodeLists(this.index) + this.index += 1 + node + + } + + def testRobinRound(): NodeAddress = { + + val list = robinArray.toList + val sorted = list.sortBy(node => node._2) + val head = sorted.head._1 + //val node = robinArray.toList.sortBy(u => u._2).head._1 + robinArray(head) += 1 + head + + } + + private def policyDefault(): NodeAddress = { + val hos = "10.0.86.179" + val por = 7687 + new NodeAddress(hos, por) + } + private def getReadNode(clusterOperator: ClusterClient, strategy: Strategy): NodeAddress = { + strategy match { + case RANDOM_PICK() => policyRandom(clusterOperator) + case DEFAULT_PICK() => policyDefault + case ROBIN_ROUND() => policyRobinRound(clusterOperator) + case EASY_ROUND() => easyRound(clusterOperator) + case _ => policyDefault + + } + } + private def getNode(isWriteStatement: Boolean, clusterOperator: ClusterClient, strategy: Strategy): NodeAddress = { + if (isWriteStatement) getWriteNode(clusterOperator) else getReadNode(clusterOperator, strategy) + } + def setPolicy(strategy: Strategy): Unit = { + this.policy = strategy + } + def getPolicy(): String = { + this.policy match { + case RANDOM_PICK() => "RANDOM_PICK" + case DEFAULT_PICK() => "DEFAULT_PICK" + case ROBIN_ROUND() => "ROBIN_ROUND" + case EASY_ROUND() => "EASY_ROUND" + case _ => "policyDefault-RANDOM_PICK" + } + } + def getDriver(isWriteStatement: Boolean, clusterOperator: ClusterClient): Driver = { + if (this.policy ==null) this.policy = new RANDOM_PICK + getDriver(isWriteStatement, clusterOperator, this.policy) + } + def getDriver(isWriteStatement: Boolean, clusterOperator: ClusterClient, strategy: Strategy): Driver = { + //val node = getNode(isWriteStatement, clusterOperator, new DEFAULT_PICK) + val node = getNode(isWriteStatement, clusterOperator, strategy) + val host = node.host + val port = node.port + val uri = s"bolt://$host:$port" + GraphDatabase.driver(uri, AuthTokens.basic("", "")) + } +} diff --git a/src/graiph-driver/scala/org/neo4j/driver/internal/messages.scala b/java-driver/src/main/scala/org/neo4j/driver/internal/messages.scala similarity index 88% rename from src/graiph-driver/scala/org/neo4j/driver/internal/messages.scala rename to java-driver/src/main/scala/org/neo4j/driver/internal/messages.scala index 31d356ea..e5f8127a 100644 --- a/src/graiph-driver/scala/org/neo4j/driver/internal/messages.scala +++ b/java-driver/src/main/scala/org/neo4j/driver/internal/messages.scala @@ -1,8 +1,8 @@ package org.neo4j.driver.internal import java.util.concurrent.CompletableFuture -import cn.graiph.blob.BlobMessageSignature -import cn.graiph.util.Logging +import cn.pandadb.blob.BlobMessageSignature +import cn.pandadb.util.Logging import org.neo4j.driver.Value import org.neo4j.driver.internal.messaging.{Message, MessageEncoder, ValuePacker} import org.neo4j.driver.internal.spi.ResponseHandler @@ -33,7 +33,8 @@ class GetBlobMessageEncoder extends MessageEncoder { } } -class GetBlobMessageHandler(report: CompletableFuture[(BlobChunk, ArrayBuffer[CompletableFuture[BlobChunk]])], exception: CompletableFuture[Throwable]) +class GetBlobMessageHandler(report: CompletableFuture[(BlobChunk, ArrayBuffer[CompletableFuture[BlobChunk]])], + exception: CompletableFuture[Throwable]) extends ResponseHandler with Logging { val _chunks = ArrayBuffer[CompletableFuture[BlobChunk]](); var _completedIndex = -1; @@ -67,12 +68,14 @@ class GetBlobMessageHandler(report: CompletableFuture[(BlobChunk, ArrayBuffer[Co override def onFailure(error: Throwable): Unit = { exception.complete(error); - if (!report.isDone) + if (!report.isDone) { report.complete(null); + } _chunks.foreach { x => - if (!x.isDone) + if (!x.isDone) { x.complete(null) + } } } } \ No newline at end of file diff --git a/src/graiph-driver/scala/org/neo4j/driver/internal/util/BoltClientBlobIO.scala b/java-driver/src/main/scala/org/neo4j/driver/internal/util/BoltClientBlobIO.scala similarity index 95% rename from src/graiph-driver/scala/org/neo4j/driver/internal/util/BoltClientBlobIO.scala rename to java-driver/src/main/scala/org/neo4j/driver/internal/util/BoltClientBlobIO.scala index ac7ba62f..d8a713d8 100644 --- a/src/graiph-driver/scala/org/neo4j/driver/internal/util/BoltClientBlobIO.scala +++ b/java-driver/src/main/scala/org/neo4j/driver/internal/util/BoltClientBlobIO.scala @@ -2,8 +2,8 @@ package org.neo4j.driver.internal.util import java.util -import cn.graiph.blob.{BlobIO, InlineBlob, BlobId, Blob} -import cn.graiph.util.ReflectUtils +import cn.pandadb.blob.{BlobIO, InlineBlob, BlobId, Blob} +import cn.pandadb.util.ReflectUtils import ReflectUtils._ import org.neo4j.driver.Value import org.neo4j.driver.internal.spi.Connection diff --git a/src/graiph-driver/scala/org/neo4j/driver/internal/value/InternalBlobValue.scala b/java-driver/src/main/scala/org/neo4j/driver/internal/value/InternalBlobValue.scala similarity index 92% rename from src/graiph-driver/scala/org/neo4j/driver/internal/value/InternalBlobValue.scala rename to java-driver/src/main/scala/org/neo4j/driver/internal/value/InternalBlobValue.scala index 178985f1..0bdec0c5 100644 --- a/src/graiph-driver/scala/org/neo4j/driver/internal/value/InternalBlobValue.scala +++ b/java-driver/src/main/scala/org/neo4j/driver/internal/value/InternalBlobValue.scala @@ -3,7 +3,8 @@ package org.neo4j.driver.internal.value import java.io.{ByteArrayInputStream, IOException, InputStream} import java.util.concurrent.CompletableFuture -import cn.graiph.blob.{MimeType, Blob, InputStreamSource} +import cn.pandadb.blob.{MimeType, Blob, InputStreamSource} +import cn.pandadb.util.PandaException import org.neo4j.driver.internal._ import org.neo4j.driver.internal.spi.Connection import org.neo4j.driver.internal.types.{TypeConstructor, TypeRepresentation} @@ -27,7 +28,7 @@ class InternalBlobValue(val blob: Blob) extends ValueAdapter { override def asBlob: Blob = blob; - override def asObject = blob; + override def asObject: AnyRef = blob; override def toString: String = s"BoltBlobValue(blob=${blob.toString})" } @@ -108,6 +109,6 @@ class BlobInputStream(firstChunk: BlobChunk, chunkFutures: ArrayBuffer[Completab } } -class FailedToReadStreamException(cause: Throwable) extends RuntimeException(cause) { +class FailedToReadStreamException(cause: Throwable) extends PandaException(s"failed to read stream", cause) { } \ No newline at end of file diff --git a/neo4j-hacking/pom.xml b/neo4j-hacking/pom.xml new file mode 100644 index 00000000..2416dc72 --- /dev/null +++ b/neo4j-hacking/pom.xml @@ -0,0 +1,53 @@ + + + + parent + cn.pandadb + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + neo4j-hacking + plugins, configuration... + + + + com.google.code.findbugs + jsr305 + + + cn.pandadb + commons + ${pandadb.version} + + + org.neo4j + neo4j + + + + + + + net.alchim31.maven + scala-maven-plugin + 3.2.1 + + + scala-compile-first + process-resources + + add-source + compile + + + + + + + + \ No newline at end of file diff --git a/src/blob/java/org/neo4j/graphdb/facade/GraphDatabaseFacadeFactory.java b/neo4j-hacking/src/main/java/org/neo4j/graphdb/facade/GraphDatabaseFacadeFactory.java similarity index 93% rename from src/blob/java/org/neo4j/graphdb/facade/GraphDatabaseFacadeFactory.java rename to neo4j-hacking/src/main/java/org/neo4j/graphdb/facade/GraphDatabaseFacadeFactory.java index d4db144b..ebe56135 100644 --- a/src/blob/java/org/neo4j/graphdb/facade/GraphDatabaseFacadeFactory.java +++ b/neo4j-hacking/src/main/java/org/neo4j/graphdb/facade/GraphDatabaseFacadeFactory.java @@ -19,10 +19,8 @@ */ package org.neo4j.graphdb.facade; -import java.io.File; -import java.util.Map; -import java.util.function.Function; - +import cn.pandadb.context.GraphDatabaseStartedEvent; +import cn.pandadb.util.PandaEventHub; import org.neo4j.bolt.BoltServer; import org.neo4j.dbms.database.DatabaseManager; import org.neo4j.graphdb.DependencyResolver; @@ -30,7 +28,6 @@ import org.neo4j.graphdb.Path; import org.neo4j.graphdb.Relationship; import org.neo4j.graphdb.factory.GraphDatabaseSettings; -import org.neo4j.internal.DataCollectorManager; import org.neo4j.graphdb.factory.module.DataSourceModule; import org.neo4j.graphdb.factory.module.PlatformModule; import org.neo4j.graphdb.factory.module.edition.AbstractEditionModule; @@ -38,6 +35,7 @@ import org.neo4j.graphdb.spatial.Geometry; import org.neo4j.graphdb.spatial.Point; import org.neo4j.helpers.collection.Pair; +import org.neo4j.internal.DataCollectorManager; import org.neo4j.internal.kernel.api.exceptions.KernelException; import org.neo4j.internal.kernel.api.security.SecurityContext; import org.neo4j.kernel.api.KernelTransaction; @@ -48,7 +46,6 @@ import org.neo4j.kernel.configuration.Config; import org.neo4j.kernel.extension.KernelExtensionFactory; import org.neo4j.kernel.impl.api.dbms.NonTransactionalDbmsOperations; -import org.neo4j.kernel.impl.blob.BlobPropertyStoreService; import org.neo4j.kernel.impl.cache.VmPauseMonitorComponent; import org.neo4j.kernel.impl.factory.DatabaseInfo; import org.neo4j.kernel.impl.factory.GraphDatabaseFacade; @@ -68,15 +65,12 @@ import org.neo4j.scheduler.DeferredExecutor; import org.neo4j.scheduler.Group; -import static org.neo4j.internal.kernel.api.procs.Neo4jTypes.NTGeometry; -import static org.neo4j.internal.kernel.api.procs.Neo4jTypes.NTNode; -import static org.neo4j.internal.kernel.api.procs.Neo4jTypes.NTPath; -import static org.neo4j.internal.kernel.api.procs.Neo4jTypes.NTPoint; -import static org.neo4j.internal.kernel.api.procs.Neo4jTypes.NTRelationship; -import static org.neo4j.kernel.api.proc.Context.DATABASE_API; -import static org.neo4j.kernel.api.proc.Context.DEPENDENCY_RESOLVER; -import static org.neo4j.kernel.api.proc.Context.KERNEL_TRANSACTION; -import static org.neo4j.kernel.api.proc.Context.SECURITY_CONTEXT; +import java.io.File; +import java.util.Map; +import java.util.function.Function; + +import static org.neo4j.internal.kernel.api.procs.Neo4jTypes.*; +import static org.neo4j.kernel.api.proc.Context.*; /** * This is the main factory for creating database instances. It delegates creation to three different modules @@ -127,8 +121,8 @@ default AvailabilityGuardInstaller availabilityGuardInstaller() protected final DatabaseInfo databaseInfo; private final Function editionFactory; - public GraphDatabaseFacadeFactory( DatabaseInfo databaseInfo, - Function editionFactory ) + public GraphDatabaseFacadeFactory(DatabaseInfo databaseInfo, + Function editionFactory ) { this.databaseInfo = databaseInfo; this.editionFactory = editionFactory; @@ -186,9 +180,6 @@ public GraphDatabaseFacade initFacade( File storeDir, Config config, final Depen Procedures procedures = setupProcedures( platform, edition, graphDatabaseFacade ); platform.dependencies.satisfyDependency( new NonTransactionalDbmsOperations( procedures ) ); - //blob support - platform.life.add( new BlobPropertyStoreService( procedures, storeDir, config, databaseInfo ) ); - Logger msgLog = platform.logging.getInternalLog( getClass() ).infoLogger(); DatabaseManager databaseManager = edition.createDatabaseManager( graphDatabaseFacade, platform, edition, procedures, msgLog ); platform.life.add( databaseManager ); @@ -253,6 +244,11 @@ public GraphDatabaseFacade initFacade( File storeDir, Config config, final Depen throw error; } + // NOTE: pandadb + //blob support + PandaEventHub.publish(new GraphDatabaseStartedEvent(procedures, storeDir, config, databaseInfo)); + //platform.life.add( new InstanceBoundServiceFactoryRegistryHolder( procedures, storeDir, config, databaseInfo ) ); + // END-NOTE return databaseFacade; } diff --git a/neo4j-hacking/src/main/scala/cn/pandadb/context/GraphDatabaseStartedEvent.scala b/neo4j-hacking/src/main/scala/cn/pandadb/context/GraphDatabaseStartedEvent.scala new file mode 100644 index 00000000..5150fef8 --- /dev/null +++ b/neo4j-hacking/src/main/scala/cn/pandadb/context/GraphDatabaseStartedEvent.scala @@ -0,0 +1,16 @@ +package cn.pandadb.context + +import java.io.File + +import cn.pandadb.util.PandaEvent +import org.neo4j.kernel.configuration.Config +import org.neo4j.kernel.impl.factory.DatabaseInfo +import org.neo4j.kernel.impl.proc.Procedures + +case class GraphDatabaseStartedEvent(proceduresService: Procedures, + storeDir: File, + neo4jConf: Config, + databaseInfo: DatabaseInfo) + extends PandaEvent { + +} diff --git a/network-commons/pom.xml b/network-commons/pom.xml new file mode 100644 index 00000000..02714dc4 --- /dev/null +++ b/network-commons/pom.xml @@ -0,0 +1,58 @@ + + + + parent + cn.pandadb + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + network-commons + shared lib for driver/server + + + + cn.pandadb + commons + ${pandadb.version} + + + net.neoremind + kraps-rpc_2.11 + + + + + + + + org.apache.curator + curator-recipes + + + org.neo4j + neo4j + test + + + cn.pandadb + neo4j-hacking + 0.0.2 + test + + + junit + junit + + + + + + + + + \ No newline at end of file diff --git a/network-commons/src/main/resources/log4j.properties b/network-commons/src/main/resources/log4j.properties new file mode 100644 index 00000000..f9f8ea82 --- /dev/null +++ b/network-commons/src/main/resources/log4j.properties @@ -0,0 +1,15 @@ +log4j.rootLogger=DEBUG, stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d{HH:mm:ss:SSS}] %-5p %-20c{1} :: %m%n + +#log4j.logger.org.neo4j.values.storable=DEBUG +log4j.logger.org.neo4j=DEBUG +log4j.logger.org.neo4j.server=DEBUG +log4j.logger.cn=DEBUG +log4j.logger.org=WARN +log4j.logger.io=WARN +log4j.logger.java=WARN +log4j.logger.org.quartz=WARN +log4j.logger.eu.medsea.mimeutil=WARN \ No newline at end of file diff --git a/network-commons/src/main/scala/cn/pandadb/network/SerializeUtil.scala b/network-commons/src/main/scala/cn/pandadb/network/SerializeUtil.scala new file mode 100644 index 00000000..86e24eec --- /dev/null +++ b/network-commons/src/main/scala/cn/pandadb/network/SerializeUtil.scala @@ -0,0 +1,26 @@ +package cn.pandadb.network + +import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 11:50 2019/12/4 + * @Modified By: + */ +object BytesTransform { + + def serialize[T](o: T): Array[Byte] = { + val bos = new ByteArrayOutputStream() + val oos = new ObjectOutputStream(bos) + oos.writeObject(o) + oos.close() + bos.toByteArray + } + + def deserialize[T](bytes: Array[Byte]): T = { + val bis = new ByteArrayInputStream(bytes) + val ois = new ObjectInputStream(bis) + ois.readObject.asInstanceOf[T] + } +} diff --git a/network-commons/src/main/scala/cn/pandadb/network/ZKClusterEventListener.scala b/network-commons/src/main/scala/cn/pandadb/network/ZKClusterEventListener.scala new file mode 100644 index 00000000..4430f983 --- /dev/null +++ b/network-commons/src/main/scala/cn/pandadb/network/ZKClusterEventListener.scala @@ -0,0 +1,20 @@ +package cn.pandadb.network + +class ZKClusterEventListener() extends ClusterEventListener { + override def onEvent(event: ClusterEvent): Unit = { + event match { + // Not implemented. + case ClusterStateChanged() => ; + case NodeConnected(nodeAddress) => ; + case NodeDisconnected(nodeAddress) => ; + case ReadRequestAccepted() => ; + case WriteRequestAccepted() => ; + case ReadRequestCompleted() => ; + case WriteRequestCompleted() => ; + case MasterWriteNodeSeleted() => ; + case READY_TO_WRITE() => ; + case WRITE_FINISHED() => ; + + } + } +} diff --git a/network-commons/src/main/scala/cn/pandadb/network/ZKPathConfig.scala b/network-commons/src/main/scala/cn/pandadb/network/ZKPathConfig.scala new file mode 100644 index 00000000..1c624bd2 --- /dev/null +++ b/network-commons/src/main/scala/cn/pandadb/network/ZKPathConfig.scala @@ -0,0 +1,36 @@ +package cn.pandadb.network + +import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} +import org.apache.curator.retry.ExponentialBackoffRetry +import org.apache.zookeeper.{CreateMode, ZooDefs} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created in 20:41 2019/11/26 + * @Modified By: + */ +object ZKPathConfig { + val registryPath = s"/PandaDB-v0.0.2" + val ordinaryNodesPath = registryPath + s"/ordinaryNodes" + val leaderNodePath = registryPath + s"/leaderNode" + val dataVersionPath = registryPath + s"/version" + val freshNodePath = registryPath + s"/freshNode" + + def initZKPath(zkString: String): Unit = { + val _curator = CuratorFrameworkFactory.newClient(zkString, + new ExponentialBackoffRetry(1000, 3)) + _curator.start() + val list = List(registryPath, ordinaryNodesPath, leaderNodePath, dataVersionPath, freshNodePath) + list.foreach(path => { + if (_curator.checkExists().forPath(path) == null) { + _curator.create() + .creatingParentsIfNeeded() + .withMode(CreateMode.PERSISTENT) + .withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE) + .forPath(path) + } + }) + _curator.close() + } +} diff --git a/network-commons/src/main/scala/cn/pandadb/network/ZookeeperBasedClusterClient.scala b/network-commons/src/main/scala/cn/pandadb/network/ZookeeperBasedClusterClient.scala new file mode 100644 index 00000000..d617cd1f --- /dev/null +++ b/network-commons/src/main/scala/cn/pandadb/network/ZookeeperBasedClusterClient.scala @@ -0,0 +1,130 @@ +package cn.pandadb.network + +import scala.collection.JavaConverters._ +import org.apache.curator.framework.recipes.cache.PathChildrenCache.StartMode +import org.apache.curator.framework.recipes.cache.{PathChildrenCache, PathChildrenCacheEvent, PathChildrenCacheListener} +import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} +import org.apache.curator.retry.ExponentialBackoffRetry +import org.apache.zookeeper.data.Stat +import org.apache.zookeeper.{CreateMode, ZooDefs} + +import scala.concurrent.Future +import scala.concurrent.ExecutionContext.Implicits.global + +/** + * @Author: Airzihao + * @Description: + * @Date: Created in 9:06 2019/11/26 + * @Modified By: + */ + +class ZookeeperBasedClusterClient(zkString: String) extends ClusterClient { + + val zkServerAddress = zkString + val curator: CuratorFramework = CuratorFrameworkFactory.newClient(zkServerAddress, + new ExponentialBackoffRetry(1000, 3)); + curator.start() + + private var currentState: ClusterState = _ + var listenerList: List[ZKClusterEventListener] = List[ZKClusterEventListener]() + + private var availableNodes: Set[NodeAddress] = { + val pathArrayList = curator.getChildren.forPath(ZKPathConfig.ordinaryNodesPath).asScala + pathArrayList.map(NodeAddress.fromString(_)).toSet + } + + addCuratorListener() + + override def getWriteMasterNode(): NodeAddress = { + var leaderAddress = curator.getChildren().forPath(ZKPathConfig.leaderNodePath) + + while (leaderAddress.isEmpty) { + Thread.sleep(500) + leaderAddress = curator.getChildren().forPath(ZKPathConfig.leaderNodePath) + } + NodeAddress.fromString(leaderAddress.get(0)) + } + + def getWriteMasterNode(inner: String): Option[NodeAddress] = { + val leaderAddress = curator.getChildren().forPath(ZKPathConfig.leaderNodePath) + + if(leaderAddress.isEmpty) { + None + } else { + Some(NodeAddress.fromString(leaderAddress.get(0))) + } + } + + override def getAllNodes(): Iterable[NodeAddress] = { + availableNodes + } + + override def getCurrentState(): ClusterState = { + currentState + } + + // add listener to listenerList, of no use at this period. + override def listen(listener: ClusterEventListener): Unit = { + listenerList = listener.asInstanceOf[ZKClusterEventListener] :: listenerList + } + + override def waitFor(state: ClusterState): Unit = { + } + + def getCurator(): CuratorFramework = { + curator + } + + def getFreshNodeIp(): String = { + curator.getChildren.forPath(ZKPathConfig.freshNodePath).get(0) + } + + def getClusterDataVersion(): Int = { + if (curator.checkExists().forPath(ZKPathConfig.dataVersionPath) == null) { + curator.create() + .creatingParentsIfNeeded() + .withMode(CreateMode.PERSISTENT) + .withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE) + .forPath(ZKPathConfig.dataVersionPath) + curator.setData().forPath(ZKPathConfig.dataVersionPath, BytesTransform.serialize(-1)) + BytesTransform.deserialize(curator.getData.forPath(ZKPathConfig.dataVersionPath)) + } else { + val stat = new Stat() + val version = curator.getData.storingStatIn(stat).forPath(ZKPathConfig.dataVersionPath) + // stat.version == 0, means not init + if (stat.getVersion == 0) { + curator.setData().forPath(ZKPathConfig.dataVersionPath, BytesTransform.serialize(-1)) + BytesTransform.deserialize(curator.getData.forPath(ZKPathConfig.dataVersionPath)) + } else { + BytesTransform.deserialize(version) + } + } + } + + def addCuratorListener(): Unit = { + val nodesChildrenCache = new PathChildrenCache(curator, ZKPathConfig.ordinaryNodesPath, true) + nodesChildrenCache.start(StartMode.BUILD_INITIAL_CACHE) + nodesChildrenCache.getListenable().addListener( + new PathChildrenCacheListener { + override def childEvent(curatorFramework: CuratorFramework, pathChildrenCacheEvent: PathChildrenCacheEvent): Unit = { + try { + pathChildrenCacheEvent.getType() match { + + case PathChildrenCacheEvent.Type.CHILD_ADDED => + val nodeAddress = NodeAddress.fromString(pathChildrenCacheEvent.getData.getPath.split(s"/").last) + availableNodes += nodeAddress + for (listener <- listenerList) listener.onEvent(NodeConnected(nodeAddress)); + case PathChildrenCacheEvent.Type.CHILD_REMOVED => + val nodeAddress = NodeAddress.fromString(pathChildrenCacheEvent.getData.getPath.split(s"/").last) + availableNodes -= nodeAddress + for (listener <- listenerList) listener.onEvent(NodeDisconnected(NodeAddress.fromString(pathChildrenCacheEvent.getData.getPath))); + // What to do if a node's data is updated? + case PathChildrenCacheEvent.Type.CHILD_UPDATED => ; + case _ => ; + } + } catch { case ex: Exception => ex.printStackTrace() } + } + }) + } + +} diff --git a/network-commons/src/main/scala/cn/pandadb/network/events.scala b/network-commons/src/main/scala/cn/pandadb/network/events.scala new file mode 100644 index 00000000..e28f853a --- /dev/null +++ b/network-commons/src/main/scala/cn/pandadb/network/events.scala @@ -0,0 +1,45 @@ +package cn.pandadb.network + +trait ClusterEvent { + +} + +case class ClusterStateChanged() extends ClusterEvent { + +} + +case class NodeConnected(nodeAddress: NodeAddress) extends ClusterEvent { + +} + +case class NodeDisconnected(nodeAddress: NodeAddress) extends ClusterEvent { + +} + +case class ReadRequestAccepted() extends ClusterEvent { + +} + +case class WriteRequestAccepted() extends ClusterEvent { + +} + +case class ReadRequestCompleted() extends ClusterEvent { + +} + +case class WriteRequestCompleted() extends ClusterEvent { + +} + +case class MasterWriteNodeSeleted() extends ClusterEvent { + +} + +case class READY_TO_WRITE() extends ClusterEvent { + +} + +case class WRITE_FINISHED() extends ClusterEvent { + +} \ No newline at end of file diff --git a/network-commons/src/main/scala/cn/pandadb/network/internal/client/InternalRpcClient.scala b/network-commons/src/main/scala/cn/pandadb/network/internal/client/InternalRpcClient.scala new file mode 100644 index 00000000..cacc0327 --- /dev/null +++ b/network-commons/src/main/scala/cn/pandadb/network/internal/client/InternalRpcClient.scala @@ -0,0 +1,31 @@ +package cn.pandadb.network.internal.client + +import cn.pandadb.network.internal.message.InternalRpcRequest +import cn.pandadb.util.Logging +import net.neoremind.kraps.RpcConf +import net.neoremind.kraps.rpc.netty.NettyRpcEnvFactory +import net.neoremind.kraps.rpc.{RpcAddress, RpcEnv, RpcEnvClientConfig} + +import scala.concurrent.Await +import scala.concurrent.duration.Duration + +/** + * Created by bluejoe on 2019/11/25. + */ +class InternalRpcClient(rpcEnv: RpcEnv, host: String, port: Int) extends Logging { + val endPointRef = { + val rpcConf = new RpcConf() + val config = RpcEnvClientConfig(rpcConf, "pandadb-internal-client") + val rpcEnv: RpcEnv = NettyRpcEnvFactory.create(config) + + rpcEnv.setupEndpointRef(RpcAddress(host, port), "pandadb-internal-client-service") + } + + def close(): Unit = { + rpcEnv.stop(endPointRef) + } + + def request[T >: InternalRpcRequest](message: Any): T = { + Await.result(endPointRef.ask(message), Duration.Inf); + } +} diff --git a/network-commons/src/main/scala/cn/pandadb/network/internal/message/InternalRpcMessage.scala b/network-commons/src/main/scala/cn/pandadb/network/internal/message/InternalRpcMessage.scala new file mode 100644 index 00000000..1e26f666 --- /dev/null +++ b/network-commons/src/main/scala/cn/pandadb/network/internal/message/InternalRpcMessage.scala @@ -0,0 +1,20 @@ +package cn.pandadb.network.internal.message + +/** + * Created by bluejoe on 2019/11/25. + */ +trait InternalRpcRequest { + +} + +trait InternalRpcResponse { + +} + +case class AuthenticationRequest() extends InternalRpcRequest{ + +} + +case class AuthenticationResponse() extends InternalRpcResponse{ + +} \ No newline at end of file diff --git a/network-commons/src/main/scala/cn/pandadb/network/net.scala b/network-commons/src/main/scala/cn/pandadb/network/net.scala new file mode 100644 index 00000000..e324c08b --- /dev/null +++ b/network-commons/src/main/scala/cn/pandadb/network/net.scala @@ -0,0 +1,59 @@ +package cn.pandadb.network + +/** + * Created by bluejoe on 2019/11/21. + */ +case class NodeAddress(host: String, port: Int) { + def getAsString: String = { + host + ":" + port.toString + } +} + +object NodeAddress { + def fromString(url: String, separator: String = ":"): NodeAddress = { + val pair = url.split(separator) + NodeAddress(pair(0), pair(1).toInt) + } +} + +// used by server & driver +trait ClusterClient { + + def getWriteMasterNode(): NodeAddress; + + def getAllNodes(): Iterable[NodeAddress]; + + def getCurrentState(): ClusterState; + + def waitFor(state: ClusterState): Unit; + + def listen(listener: ClusterEventListener): Unit; +} + +trait ClusterEventListener { + def onEvent(event: ClusterEvent) +} + +trait ClusterState { + +} + +case class LockedServing() extends ClusterState{ + +} + +case class UnlockedServing() extends ClusterState{ + +} + +case class PreWrite() extends ClusterState{ + +} + +case class Writing() extends ClusterState{ + +} + +case class Finished() extends ClusterState{ + +} \ No newline at end of file diff --git a/network-commons/src/test/resources/test_pnode0.conf b/network-commons/src/test/resources/test_pnode0.conf new file mode 100644 index 00000000..0ae98a18 --- /dev/null +++ b/network-commons/src/test/resources/test_pnode0.conf @@ -0,0 +1,37 @@ +dbms.security.auth_enabled=false +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=0.0.0.0:7685 + +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=localhost:7469 +dbms.connector.https.enabled=false +dbms.logs.http.enabled=true + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +blob.storage.hbase.zookeeper.port=2181 +blob.storage.hbase.zookeeper.quorum=localhost +blob.storage.hbase.auto_create_table=true +blob.storage.hbase.table=PIDB_BLOB + +blob.aipm.modules.enabled=false +blob.aipm.modules.dir=/usr/local/aipm/modules/ + +#blob.storage=cn.pidb.engine.FileBlobValueStorage +#blob.storage.file.dir=/tmp + +#dbms.active_database=testdb +aipm.http.host.url=http://10.0.86.128:8081/ + +# fake address just for test, supposed to be the real ip:port of localhost:boltPort +#serviceAddress=10.0.88.11:1111 +# zookeeper revelant args +zkServerAddress=10.0.86.26:2181,10.0.86.27:2181,10.0.86.70:2181 +sessionTimeout=20000 +connectionTimeout=10000 +registryPath=/pandaNodes +ordinaryNodesPath=/pandaNodes/ordinaryNodes +leaderNodePath=/pandaNodes/leaderNode +localNodeAddress=10.0.88.11:1111 \ No newline at end of file diff --git a/network-commons/src/test/scala/ZKConstantsTest.scala b/network-commons/src/test/scala/ZKConstantsTest.scala new file mode 100644 index 00000000..4334fc03 --- /dev/null +++ b/network-commons/src/test/scala/ZKConstantsTest.scala @@ -0,0 +1,18 @@ +import cn.pandadb.network.ZKPathConfig +import org.junit.{Assert, Test} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created in 11:07 2019/11/26 + * @Modified By: + */ +class ZKConstantsTest { + + @Test + def testZKPathConfig(): Unit = { + Assert.assertEquals(s"/PandaDB-v0.0.2", ZKPathConfig.registryPath) + Assert.assertEquals(s"/PandaDB-v0.0.2/leaderNode", ZKPathConfig.leaderNodePath) + Assert.assertEquals(s"/PandaDB-v0.0.2/ordinaryNodes", ZKPathConfig.ordinaryNodesPath) + } +} diff --git a/network-commons/src/test/scala/ZKDiscoveryTest.scala b/network-commons/src/test/scala/ZKDiscoveryTest.scala new file mode 100644 index 00000000..278fb1df --- /dev/null +++ b/network-commons/src/test/scala/ZKDiscoveryTest.scala @@ -0,0 +1,103 @@ +//import cn.pandadb.network._ +//import cn.pandadb.server.ZKServiceRegistry +//import org.apache.curator.framework.recipes.cache.PathChildrenCache.StartMode +//import org.apache.curator.framework.recipes.cache.{PathChildrenCache, PathChildrenCacheEvent, PathChildrenCacheListener} +//import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} +//import org.apache.curator.retry.ExponentialBackoffRetry +//import org.junit.runners.MethodSorters +//import org.junit.{Assert, FixMethodOrder, Test} +// +//import ZKDiscoveryTest.{listenerList, localNodeAddress, ordinadyNodeRegistry} +///** +// * @Author: Airzihao +// * @Description: +// * @Date: Created in 17:02 2019/11/26 +// * @Modified By: +// */ +// +//class FakeListener(listenerId: Int) { +// val id = listenerId +// var CHILD_ADDED = 0 +// var CHILD_REMOVED = 0 +// var path = s""; +//} +//object ZKDiscoveryTest { +// val zkServerAddress = "10.0.86.26:2181"; +// val localNodeAddress = "10.0.88.11:1111" +// val curator: CuratorFramework = CuratorFrameworkFactory.newClient(zkServerAddress, +// new ExponentialBackoffRetry(1000, 3)); +// curator.start() +// +// val listenerList: List[FakeListener] = List(new FakeListener(1), new FakeListener(2)) +// val ordinadyNodeRegistry = new ZKServiceRegistry(zkServerAddress) +// +// val initListenerList = _addListener(curator, listenerList) +// +// private def _addListener(curator: CuratorFramework, listenerList: List[FakeListener]) { +// +// val nodesChildrenCache = new PathChildrenCache(curator, ZKPathConfig.ordinaryNodesPath, false) +// +// //caution: use sync method. POST_INITIAL_EVENT is an async method. +// nodesChildrenCache.start(StartMode.BUILD_INITIAL_CACHE) +// +// val listener = new PathChildrenCacheListener { +// override def childEvent(curatorFramework: CuratorFramework, pathChildrenCacheEvent: PathChildrenCacheEvent): Unit = { +// try { +// pathChildrenCacheEvent.getType() match { +// case PathChildrenCacheEvent.Type.CHILD_ADDED => +// for (listener <- listenerList) { +// listener.CHILD_ADDED = 1; +// // if not splitted, returned: /pandaNodes/ordinaryNodes.10.0.88.11:1111 +// listener.path = pathChildrenCacheEvent.getData.getPath.split(s"/").last +// } +// +// case PathChildrenCacheEvent.Type.CHILD_REMOVED => +// for (listener <- listenerList) { +// listener.CHILD_REMOVED = 1; +// listener.path = pathChildrenCacheEvent.getData.getPath +// } +// // What to do if a node's data is updated? +// case PathChildrenCacheEvent.Type.CHILD_UPDATED => ; +// case _ => ; +// } +// } catch { case ex: Exception => ex.printStackTrace() } +// } +// } +// nodesChildrenCache.getListenable().addListener(listener) +// } +//} +// +//@FixMethodOrder(MethodSorters.NAME_ASCENDING) +//class ZKDiscoveryTest { +// +// @Test +// def test0(): Unit = { +// for (listener <- listenerList) { +// Assert.assertEquals(0, listener.CHILD_ADDED) +// Assert.assertEquals(0, listener.CHILD_REMOVED) +// Assert.assertEquals("", listener.path) +// } +// } +// +// @Test +// def test1(): Unit = { +// ordinadyNodeRegistry.registerAsOrdinaryNode(NodeAddress.fromString(localNodeAddress)) +// Thread.sleep(1000) +// for (listener <- listenerList) { +// Assert.assertEquals(1, listener.CHILD_ADDED) +// Assert.assertEquals(0, listener.CHILD_REMOVED) +// Assert.assertEquals("10.0.88.11:1111", listener.path) +// } +// } +// +// @Test +// def test2(): Unit = { +// ordinadyNodeRegistry.unRegisterOrdinaryNode(NodeAddress.fromString(localNodeAddress)) +// Thread.sleep(1000) +// +// for (listener <- listenerList) { +// Assert.assertEquals(1, listener.CHILD_ADDED) +// Assert.assertEquals(1, listener.CHILD_REMOVED) +// } +// } +//} \ No newline at end of file diff --git a/network-commons/src/test/scala/ZookeeperBasedClusterClientTest.scala b/network-commons/src/test/scala/ZookeeperBasedClusterClientTest.scala new file mode 100644 index 00000000..c30f05ca --- /dev/null +++ b/network-commons/src/test/scala/ZookeeperBasedClusterClientTest.scala @@ -0,0 +1,53 @@ +//import cn.pandadb.network.{NodeAddress, ZookeeperBasedClusterClient} +//import cn.pandadb.server.ZKServiceRegistry +//import org.junit.runners.MethodSorters +//import org.junit.{Assert, FixMethodOrder, Test} +// +///** +// * @Author: Airzihao +// * @Description: add some cases to fully test the func. +// * @Date: Created at 10:32 2019/11/27 +// * @Modified By: +// */ +// +//@FixMethodOrder(MethodSorters.NAME_ASCENDING) +//class ZookeeperBasedClusterClientTest { +// +// val zkString = "10.0.86.26:2181" +// val localNodeAddress = "10.0.88.11:1111" +// +// val clusterClient = new ZookeeperBasedClusterClient(zkString) +// val register = new ZKServiceRegistry(zkString) +// +// // empty at first +// @Test +// def test1(): Unit = { +// Assert.assertEquals(true, clusterClient.getAllNodes().isEmpty) +// } +// +// // getAllNodes, will get test node +// @Test +// def test2(): Unit = { +// register.registerAsOrdinaryNode(NodeAddress.fromString(localNodeAddress)) +// Thread.sleep(1000) +// Assert.assertEquals(false, clusterClient.getAllNodes().isEmpty) +// Assert.assertEquals(NodeAddress.fromString("10.0.88.11:1111"), clusterClient.getAllNodes().iterator.next()) +// } +// +// // empty after test node unRegister itself +// @Test +// def test3(): Unit = { +// register.unRegisterOrdinaryNode(NodeAddress.fromString(localNodeAddress)) +// Thread.sleep(1000) +// Assert.assertEquals(true, clusterClient.getAllNodes().isEmpty) +// } +// +// // test leader +// @Test +// def test4(): Unit = { +// register.registerAsLeader(NodeAddress.fromString(localNodeAddress)) +// Thread.sleep(1000) +// Assert.assertEquals(NodeAddress.fromString("10.0.88.11:1111"), clusterClient.getWriteMasterNode("").get) +// } +// +//} diff --git a/packaging/pom.xml b/packaging/pom.xml new file mode 100644 index 00000000..e7048a56 --- /dev/null +++ b/packaging/pom.xml @@ -0,0 +1,90 @@ + + + parent + cn.pandadb + 0.0.2 + + 4.0.0 + cn.pandadb + packaging-build + + + + + + + + cn.pandadb + tools + ${project.version} + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.1.0 + + false + pandadb-${project.version} + true + ${project.build.directory} + + + #{*} + + + + + jar-with-dependency + package + + single + + + + jar-with-dependencies + + ${project.build.directory}/lib + + + + community-unix-dist + package + + single + + + + src/main/assemblies/community-unix-dist.xml + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/packaging/src/main/assemblies/community-unix-dist.xml b/packaging/src/main/assemblies/community-unix-dist.xml new file mode 100644 index 00000000..8d8c6f07 --- /dev/null +++ b/packaging/src/main/assemblies/community-unix-dist.xml @@ -0,0 +1,40 @@ + + + + unix + + tar.gz + + + + + + + src/main/distribution/text/community + + keep + true + 0755 + 0644 + + **/.keep + + + + + ${project.build.directory}/lib + lib + true + 0755 + 0755 + + *.jar + + + + + + + diff --git a/packaging/src/main/distribution/text/community/bin/.keep b/packaging/src/main/distribution/text/community/bin/.keep new file mode 100644 index 00000000..e69de29b diff --git a/packaging/src/main/distribution/text/community/bin/pandadb.sh b/packaging/src/main/distribution/text/community/bin/pandadb.sh new file mode 100644 index 00000000..588c6ac6 --- /dev/null +++ b/packaging/src/main/distribution/text/community/bin/pandadb.sh @@ -0,0 +1,586 @@ +#!/usr/bin/env bash + +# Callers may provide the following environment variables to customize this script: +# * JAVA_HOME +# * JAVA_CMD +# * PANDADB_HOME +# * PANDADB_CONF +# * PANDADB_START_WAIT + + +set -o errexit -o nounset -o pipefail +[[ "${TRACE:-}" ]] && set -o xtrace + +declare -r PROGRAM="$(basename "$0")" + +# Sets up the standard environment for running PandaDB shell scripts. +# +# Provides these environment variables: +# PANDADB_HOME +# PANDADB_CONF +# PANDADB_DATA +# PANDADB_LIB +# PANDADB_LOGS +# PANDADB_PIDFILE +# PANDADB_PLUGINS +# one per config setting, with dots converted to underscores +# +setup_environment() { + _setup_calculated_paths + _read_config + _setup_configurable_paths +} + +setup_heap() { + if [[ -n "${HEAP_SIZE:-}" ]]; then + JAVA_MEMORY_OPTS_XMS="-Xms${HEAP_SIZE}" + JAVA_MEMORY_OPTS_XMX="-Xmx${HEAP_SIZE}" + fi +} + +build_classpath() { + CLASSPATH="${PANDADB_PLUGINS}:${PANDADB_CONF}:${PANDADB_LIB}/*:${PANDADB_PLUGINS}/*" + + # augment with tools.jar, will need JDK + if [ "${JAVA_HOME:-}" ]; then + JAVA_TOOLS="${JAVA_HOME}/lib/tools.jar" + if [[ -e $JAVA_TOOLS ]]; then + CLASSPATH="${CLASSPATH}:${JAVA_TOOLS}" + fi + fi +} + +detect_os() { + if uname -s | grep -q Darwin; then + DIST_OS="macosx" + elif [[ -e /etc/gentoo-release ]]; then + DIST_OS="gentoo" + else + DIST_OS="other" + fi +} + +setup_memory_opts() { + # In some cases the heap size may have already been set before we get here, from e.g. HEAP_SIZE env.variable, if so then skip + if [[ -n "${dbms_memory_heap_initial_size:-}" && -z "${JAVA_MEMORY_OPTS_XMS-}" ]]; then + local mem="${dbms_memory_heap_initial_size}" + if ! [[ ${mem} =~ .*[gGmMkK] ]]; then + mem="${mem}m" + cat >&2 <&2 <&1 | awk -F '"' '/version/ {print $2}') + if [[ $JAVA_VERSION = "1."* ]]; then + if [[ "${JAVA_VERSION}" < "1.8" ]]; then + echo "ERROR! PandaDB cannot be started using java version ${JAVA_VERSION}. " + _show_java_help + exit 1 + fi + if ! ("${version_command[@]}" 2>&1 | egrep -q "(Java HotSpot\\(TM\\)|OpenJDK|IBM) (64-Bit Server|Server|Client|J9) VM"); then + unsupported_runtime_warning + fi + elif [[ $JAVA_VERSION = "11"* ]]; then + if ! ("${version_command[@]}" 2>&1 | egrep -q "(Java HotSpot\\(TM\\)|OpenJDK|IBM) (64-Bit Server|Server|Client|J9) VM"); then + unsupported_runtime_warning + fi + else + unsupported_runtime_warning + fi +} + +unsupported_runtime_warning() { + echo "WARNING! You are using an unsupported Java runtime. " + _show_java_help +} + +# Resolve a path relative to $PANDADB_HOME. Don't resolve if +# the path is absolute. +resolve_path() { + orig_filename=$1 + if [[ ${orig_filename} == /* ]]; then + filename="${orig_filename}" + else + filename="${PANDADB_HOME}/${orig_filename}" + fi + echo "${filename}" +} + +call_main_class() { + setup_environment + check_java + build_classpath + EXTRA_JVM_ARGUMENTS="-Dfile.encoding=UTF-8" + class_name=$1 + shift + + export PANDADB_HOME PANDADB_CONF + + exec "${JAVA_CMD}" ${JAVA_OPTS:-} ${JAVA_MEMORY_OPTS_XMS-} ${JAVA_MEMORY_OPTS_XMX-} \ + -classpath "${CLASSPATH}" \ + ${EXTRA_JVM_ARGUMENTS:-} \ + $class_name "$@" +} + +_find_java_cmd() { + [[ "${JAVA_CMD:-}" ]] && return + detect_os + _find_java_home + + if [[ "${JAVA_HOME:-}" ]] ; then + JAVA_CMD="${JAVA_HOME}/bin/java" + if [[ ! -f "${JAVA_CMD}" ]]; then + echo "ERROR: JAVA_HOME is incorrectly defined as ${JAVA_HOME} (the executable ${JAVA_CMD} does not exist)" + exit 1 + fi + else + if [ "${DIST_OS}" != "macosx" ] ; then + # Don't use default java on Darwin because it displays a misleading dialog box + JAVA_CMD="$(which java || true)" + fi + fi + + if [[ ! "${JAVA_CMD:-}" ]]; then + echo "ERROR: Unable to find Java executable." + _show_java_help + exit 1 + fi +} + +_find_java_home() { + [[ "${JAVA_HOME:-}" ]] && return + + case "${DIST_OS}" in + "macosx") + JAVA_HOME="$(/usr/libexec/java_home -v 1.8)" + ;; + "gentoo") + JAVA_HOME="$(java-config --jre-home)" + ;; + esac +} + +_show_java_help() { + echo "* Please use Oracle(R) Java(TM) 8, OpenJDK(TM) or IBM J9 to run PandaDB." +} + +_setup_calculated_paths() { + if [[ -z "${PANDADB_HOME:-}" ]]; then + PANDADB_HOME="$(cd "$(dirname "$0")"/.. && pwd)" + fi + : "${PANDADB_CONF:="${PANDADB_HOME}/conf"}" + readonly PANDADB_HOME PANDADB_CONF +} + +_read_config() { + # - plain key-value pairs become environment variables + # - keys have '.' chars changed to '_' + # - keys of the form KEY.# (where # is a number) are concatenated into a single environment variable named KEY + parse_line() { + line="$1" + if [[ "${line}" =~ ^([^#\s][^=]+)=(.+)$ ]]; then + key="${BASH_REMATCH[1]//./_}" + value="${BASH_REMATCH[2]}" + if [[ "${key}" =~ ^(.*)_([0-9]+)$ ]]; then + key="${BASH_REMATCH[1]}" + fi + # Ignore keys that start with a number because export ${key}= will fail - it is not valid for a bash env var to start with a digit + if [[ ! "${key}" =~ ^[0-9]+.*$ ]]; then + if [[ "${!key:-}" ]]; then + export ${key}="${!key} ${value}" + else + export ${key}="${value}" + fi + else + echo >&2 "WARNING: Ignoring key ${key}, environment variables cannot start with a number." + fi + fi + } + + for file in "pandadb.conf"; do + path="${PANDADB_CONF}/${file}" + if [ -e "${path}" ]; then + while read line; do + parse_line "${line}" + done <"${path}" + fi + done +} + +_setup_configurable_paths() { + PANDADB_DATA=$(resolve_path "${dbms_directories_data:-data}") + PANDADB_LIB=$(resolve_path "${dbms_directories_lib:-lib}") + PANDADB_LOGS=$(resolve_path "${dbms_directories_logs:-logs}") + PANDADB_PLUGINS=$(resolve_path "${dbms_directories_plugins:-plugins}") + PANDADB_RUN=$(resolve_path "${dbms_directories_run:-run}") + PANDADB_CERTS=$(resolve_path "${dbms_directories_certificates:-certificates}") + + if [ -z "${dbms_directories_import:-}" ]; then + PANDADB_IMPORT="NOT SET" + else + PANDADB_IMPORT=$(resolve_path "${dbms_directories_import:-}") + fi + + readonly PANDADB_DATA PANDADB_LIB PANDADB_LOGS PANDADB_PLUGINS PANDADB_RUN PANDADB_IMPORT PANDADB_CERTS +} + +print_configurable_paths() { + cat </dev/null || unset PANDADB_PID + fi +} + +check_limits() { + detect_os + if [ "${DIST_OS}" != "macosx" ] ; then + ALLOWED_OPEN_FILES="$(ulimit -n)" + + if [ "${ALLOWED_OPEN_FILES}" -lt "${MIN_ALLOWED_OPEN_FILES}" ]; then + echo "WARNING: Max ${ALLOWED_OPEN_FILES} open files allowed, minimum of ${MIN_ALLOWED_OPEN_FILES} recommended. See the Neo4j manual." + fi + fi +} + +setup_java_opts() { + JAVA_OPTS=("-server" ${JAVA_MEMORY_OPTS_XMS-} ${JAVA_MEMORY_OPTS_XMX-}) + + if [[ "${dbms_logs_gc_enabled:-}" = "true" ]]; then + if [[ "${JAVA_VERSION}" = "1.8"* ]]; then + # JAVA 8 GC logging setup + JAVA_OPTS+=("-Xloggc:${PANDADB_LOGS}/gc.log" \ + "-XX:+UseGCLogFileRotation" \ + "-XX:NumberOfGCLogFiles=${dbms_logs_gc_rotation_keep_number:-5}" \ + "-XX:GCLogFileSize=${dbms_logs_gc_rotation_size:-20m}") + if [[ -n "${dbms_logs_gc_options:-}" ]]; then + JAVA_OPTS+=(${dbms_logs_gc_options}) # unquoted to split on spaces + else + JAVA_OPTS+=("-XX:+PrintGCDetails" "-XX:+PrintGCDateStamps" "-XX:+PrintGCApplicationStoppedTime" \ + "-XX:+PrintPromotionFailure" "-XX:+PrintTenuringDistribution") + fi + else + # JAVA 9 and newer GC logging setup + local gc_options + if [[ -n "${dbms_logs_gc_options:-}" ]]; then + gc_options="${dbms_logs_gc_options}" + else + gc_options="-Xlog:gc*,safepoint,age*=trace" + fi + gc_options+=":file=${PANDADB_LOGS}/gc.log::filecount=${dbms_logs_gc_rotation_keep_number:-5},filesize=${dbms_logs_gc_rotation_size:-20m}" + JAVA_OPTS+=(${gc_options}) + fi + fi + + if [[ -n "${dbms_jvm_additional:-}" ]]; then + JAVA_OPTS+=(${dbms_jvm_additional}) # unquoted to split on spaces + fi +} + +assemble_command_line() { + retval=("${JAVA_CMD}" "-cp" "${CLASSPATH}" "${JAVA_OPTS[@]}" "-Dfile.encoding=UTF-8" "${MAIN_CLASS}" \ + "${PANDADB_HOME}/data" "${PANDADB_CONF}/pandadb.conf") +} + +do_console() { + check_status + if [[ "${PANDADB_PID:-}" ]] ; then + echo "PandaDB is already running (pid ${PANDADB_PID})." + exit 1 + fi + + echo "Starting PandaDB." + + check_limits + build_classpath + + assemble_command_line + command_line=("${retval[@]}") + exec "${command_line[@]}" +} + +do_start() { + check_status + if [[ "${PANDADB_PID:-}" ]] ; then + echo "PandaDB is already running (pid ${PANDADB_PID})." + exit 0 + fi + # check dir for pidfile exists + if [[ ! -d $(dirname "${PANDADB_PIDFILE}") ]]; then + mkdir -p $(dirname "${PANDADB_PIDFILE}") + fi + + echo "Starting PandaDB." + + check_limits + build_classpath + + assemble_command_line + command_line=("${retval[@]}") + nohup "${command_line[@]}" >>"${CONSOLE_LOG}" 2>&1 & + echo "$!" >"${PANDADB_PIDFILE}" + + : "${PANDADB_START_WAIT:=5}" + end="$((SECONDS+PANDADB_START_WAIT))" + while true; do + check_status + + if [[ "${PANDADB_PID:-}" ]]; then + break + fi + + if [[ "${SECONDS}" -ge "${end}" ]]; then + echo "Unable to start. See ${CONSOLE_LOG} for details." + rm "${PANDADB_PIDFILE}" + return 1 + fi + + sleep 1 + done + + print_start_message + echo "See ${CONSOLE_LOG} for current status." +} + +do_stop() { + check_status + + if [[ ! "${PANDADB_PID:-}" ]] ; then + echo "PandaDB not running" + [ -e "${PANDADB_PIDFILE}" ] && rm "${PANDADB_PIDFILE}" + return 0 + else + echo -n "Stopping PandaDB." + end="$((SECONDS+SHUTDOWN_TIMEOUT))" + while true; do + check_status + + if [[ ! "${PANDADB_PID:-}" ]]; then + echo " stopped" + [ -e "${PANDADB_PIDFILE}" ] && rm "${PANDADB_PIDFILE}" + return 0 + fi + + kill "${PANDADB_PID}" 2>/dev/null || true + + if [[ "${SECONDS}" -ge "${end}" ]]; then + echo " failed to stop" + echo "PandaDB (pid ${PANDADB_PID}) took more than ${SHUTDOWN_TIMEOUT} seconds to stop." + echo "Please see ${CONSOLE_LOG} for details." + return 1 + fi + + echo -n "." + sleep 1 + done + fi +} + +do_status() { + check_status + if [[ ! "${PANDADB_PID:-}" ]] ; then + echo "PandaDB is not running" + exit 3 + else + echo "PandaDB is running at pid ${PANDADB_PID}" + fi +} + +do_version() { + build_classpath + + assemble_command_line + command_line=("${retval[@]}" "--version") + exec "${command_line[@]}" +} + +send_command_to_all_nodes(){ + if [[ "${pandadb_cluster_nodes:-}" ]] ; then + echo "PandaDB cluster nodes: ${pandadb_cluster_nodes}" + nodes=$(echo $pandadb_cluster_nodes|tr "," "\n") + for node in ${nodes[@]}; do + ssh_cmd="ssh ${node} $1" + echo "${ssh_cmd}" + ssh ${node} $1 + done + else + echo "WARNING: pandadb.cluster.nodes is not set in configure file." + fi +} + +setup_java () { + check_java + setup_java_opts + setup_arbiter_options +} + + +main() { + setup_environment + CONSOLE_LOG="${PANDADB_LOGS}/pandadb.log" + PANDADB_PIDFILE="${PANDADB_RUN}/pandadb.pid" + readonly CONSOLE_LOG PANDADB_PIDFILE + + case "${1:-}" in + console) + setup_java + print_active_database + print_configurable_paths + do_console + ;; + + start) + START_NODE_KIND="pnode" + setup_java + print_active_database + print_configurable_paths + do_start + ;; + + start-all-nodes) + send_command_to_all_nodes "source /etc/profile;cd \$PANDADB_HOME;pandadb.sh start;" + ;; + + start-watch-dog) + START_NODE_KIND="watch-dog" + setup_java + print_active_database + print_configurable_paths + do_start + ;; + + stop) + setup_arbiter_options + do_stop + ;; + + stop-all-nodes) + send_command_to_all_nodes "source /etc/profile;cd \$PANDADB_HOME;pandadb.sh stop;" + ;; + + restart) + setup_java + do_stop + do_start + ;; + + status) + do_status + ;; +# +# --version|version) +# setup_java +# do_version +# ;; + + help) + echo "Usage: ${PROGRAM} { console | start | start-watch-dog | stop | restart | start-all-nodes | stop-all-nodes | status | version }" + ;; + + *) + echo >&2 "Usage: ${PROGRAM} { console | start | start-watch-dog | stop | restart | start-all-nodes | stop-all-nodes | status | version }" + exit 1 + ;; + esac +} + +main "$@" diff --git a/packaging/src/main/distribution/text/community/conf/pandadb.conf b/packaging/src/main/distribution/text/community/conf/pandadb.conf new file mode 100644 index 00000000..4e6487c5 --- /dev/null +++ b/packaging/src/main/distribution/text/community/conf/pandadb.conf @@ -0,0 +1,412 @@ +#***************************************************************** +# PandaDB configuration +#***************************************************************** + +# IP or Hostname of cluster nodes +pandadb.cluster.nodes=10.0.82.216,10.0.82.217,10.0.82.218 + + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +#blob.storage.hbase.zookeeper.port=2181 +#blob.storage.hbase.zookeeper.quorum=localhost +#blob.storage.hbase.auto_create_table=true +#blob.storage.hbase.table=PIDB_BLOB + +#blob.storage=org.neo4j.kernel.impl.blob.DefaultLocalFileSystemBlobValueStorage +#blob.storage.file.dir=/tmp + +#blob.aipm.modules.enabled=false +#blob.aipm.modules.dir=/usr/local/aipm/modules/ +aipm.http.host.url=http://127.0.0.1:8081/ + + +# zk config +zookeeper.address=10.0.82.216:2181,10.0.82.217:2181 + +# this node service address +node.server.address=10.0.82.216:7687 + +# node communication rpc config +rpc.port=1224 + + +# external storage config +external.property.storage.enabled=true + +# solr external storage config +# external.properties.store.factory=cn.pandadb.externalprops.InSolrPropertyNodeStoreFactory +# external.properties.store.solr.zk=10.0.82.216:2181,10.0.82.217:2181,10.0.82.218:2181 +# external.properties.store.solr.collection=pandaDB + +# ElasticSearch external storage config +external.properties.store.factory=cn.pandadb.externalprops.InElasticSearchPropertyNodeStoreFactory +external.properties.store.es.host=10.0.82.216 +external.properties.store.es.port=9200 +external.properties.store.es.schema=http +external.properties.store.es.scroll.size=1000 +external.properties.store.es.scroll.time.minutes=10 +external.properties.store.es.index=test-0119 +external.properties.store.es.type=nodes + + +#***************************************************************** +# Neo4j configuration +# +# For more details and a complete list of settings, please see +# https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/ +#***************************************************************** + +# The name of the database to mount +#dbms.active_database=graph.db + +# Paths of directories in the installation. +#dbms.directories.data=data +#dbms.directories.plugins=plugins +#dbms.directories.certificates=certificates +#dbms.directories.logs=logs +#dbms.directories.lib=lib +#dbms.directories.run=run + +# This setting constrains all `LOAD CSV` import files to be under the `import` directory. Remove or comment it out to +# allow files to be loaded from anywhere in the filesystem; this introduces possible security problems. See the +# `LOAD CSV` section of the manual for details. +dbms.directories.import=import + +# Whether requests to Neo4j are authenticated. +# To disable authentication, uncomment this line +dbms.security.auth_enabled=false + +# Enable this to be able to upgrade a store from an older version. +#dbms.allow_upgrade=true + +# Java Heap Size: by default the Java heap size is dynamically +# calculated based on available system resources. +# Uncomment these lines to set specific initial and maximum +# heap size. +#dbms.memory.heap.initial_size=512m +#dbms.memory.heap.max_size=512m + +# The amount of memory to use for mapping the store files, in bytes (or +# kilobytes with the 'k' suffix, megabytes with 'm' and gigabytes with 'g'). +# If Neo4j is running on a dedicated server, then it is generally recommended +# to leave about 2-4 gigabytes for the operating system, give the JVM enough +# heap to hold all your transaction state and query context, and then leave the +# rest for the page cache. +# The default page cache memory assumes the machine is dedicated to running +# Neo4j, and is heuristically set to 50% of RAM minus the max Java heap size. +#dbms.memory.pagecache.size=10g + +#***************************************************************** +# Network connector configuration +#***************************************************************** + +# With default configuration Neo4j only accepts local connections. +# To accept non-local connections, uncomment this line: +dbms.connectors.default_listen_address=0.0.0.0 + +# You can also choose a specific network interface, and configure a non-default +# port for each connector, by setting their individual listen_address. + +# The address at which this server can be reached by its clients. This may be the server's IP address or DNS name, or +# it may be the address of a reverse proxy which sits in front of the server. This setting may be overridden for +# individual connectors below. +dbms.connectors.default_advertised_address=0.0.0.0 + +# You can also choose a specific advertised hostname or IP address, and +# configure an advertised port for each connector, by setting their +# individual advertised_address. + +# Bolt connector +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=:7685 + +# HTTP Connector. There can be zero or one HTTP connectors. +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=:7474 + +# HTTPS Connector. There can be zero or one HTTPS connectors. +#dbms.connector.https.enabled=true +#dbms.connector.https.listen_address=:7473 + +# Number of Neo4j worker threads. +#dbms.threads.worker_count= + +#***************************************************************** +# SSL system configuration +#***************************************************************** + +# Names of the SSL policies to be used for the respective components. + +# The legacy policy is a special policy which is not defined in +# the policy configuration section, but rather derives from +# dbms.directories.certificates and associated files +# (by default: neo4j.key and neo4j.cert). Its use will be deprecated. + +# The policies to be used for connectors. +# +# N.B: Note that a connector must be configured to support/require +# SSL/TLS for the policy to actually be utilized. +# +# see: dbms.connector.*.tls_level + +#bolt.ssl_policy=legacy +#https.ssl_policy=legacy + +#***************************************************************** +# SSL policy configuration +#***************************************************************** + +# Each policy is configured under a separate namespace, e.g. +# dbms.ssl.policy..* +# +# The example settings below are for a new policy named 'default'. + +# The base directory for cryptographic objects. Each policy will by +# default look for its associated objects (keys, certificates, ...) +# under the base directory. +# +# Every such setting can be overridden using a full path to +# the respective object, but every policy will by default look +# for cryptographic objects in its base location. +# +# Mandatory setting + +#dbms.ssl.policy.default.base_directory=certificates/default + +# Allows the generation of a fresh private key and a self-signed +# certificate if none are found in the expected locations. It is +# recommended to turn this off again after keys have been generated. +# +# Keys should in general be generated and distributed offline +# by a trusted certificate authority (CA) and not by utilizing +# this mode. + +#dbms.ssl.policy.default.allow_key_generation=false + +# Enabling this makes it so that this policy ignores the contents +# of the trusted_dir and simply resorts to trusting everything. +# +# Use of this mode is discouraged. It would offer encryption but no security. + +#dbms.ssl.policy.default.trust_all=false + +# The private key for the default SSL policy. By default a file +# named private.key is expected under the base directory of the policy. +# It is mandatory that a key can be found or generated. + +#dbms.ssl.policy.default.private_key= + +# The private key for the default SSL policy. By default a file +# named public.crt is expected under the base directory of the policy. +# It is mandatory that a certificate can be found or generated. + +#dbms.ssl.policy.default.public_certificate= + +# The certificates of trusted parties. By default a directory named +# 'trusted' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). +# +# To enforce client authentication client_auth must be set to 'require'! + +#dbms.ssl.policy.default.trusted_dir= + +# Client authentication setting. Values: none, optional, require +# The default is to require client authentication. +# +# Servers are always authenticated unless explicitly overridden +# using the trust_all setting. In a mutual authentication setup this +# should be kept at the default of require and trusted certificates +# must be installed in the trusted_dir. + +#dbms.ssl.policy.default.client_auth=require + +# It is possible to verify the hostname that the client uses +# to connect to the remote server. In order for this to work, the server public +# certificate must have a valid CN and/or matching Subject Alternative Names. + +# Note that this is irrelevant on host side connections (sockets receiving +# connections). + +# To enable hostname verification client side on nodes, set this to true. + +#dbms.ssl.policy.default.verify_hostname=false + +# A comma-separated list of allowed TLS versions. +# By default only TLSv1.2 is allowed. + +#dbms.ssl.policy.default.tls_versions= + +# A comma-separated list of allowed ciphers. +# The default ciphers are the defaults of the JVM platform. + +#dbms.ssl.policy.default.ciphers= + +#***************************************************************** +# Logging configuration +#***************************************************************** + +# To enable HTTP logging, uncomment this line +dbms.logs.http.enabled=true + +# Number of HTTP logs to keep. +#dbms.logs.http.rotation.keep_number=5 + +# Size of each HTTP log that is kept. +#dbms.logs.http.rotation.size=20m + +# To enable GC Logging, uncomment this line +#dbms.logs.gc.enabled=true + +# GC Logging Options +# see http://docs.oracle.com/cd/E19957-01/819-0084-10/pt_tuningjava.html#wp57013 for more information. +#dbms.logs.gc.options=-XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+PrintTenuringDistribution + +# For Java 9 and newer GC Logging Options +# see https://docs.oracle.com/javase/10/tools/java.htm#JSWOR-GUID-BE93ABDC-999C-4CB5-A88B-1994AAAC74D5 +#dbms.logs.gc.options=-Xlog:gc*,safepoint,age*=trace + +# Number of GC logs to keep. +#dbms.logs.gc.rotation.keep_number=5 + +# Size of each GC log that is kept. +#dbms.logs.gc.rotation.size=20m + +# Log level for the debug log. One of DEBUG, INFO, WARN and ERROR. Be aware that logging at DEBUG level can be very verbose. +#dbms.logs.debug.level=INFO + +# Size threshold for rotation of the debug log. If set to zero then no rotation will occur. Accepts a binary suffix "k", +# "m" or "g". +#dbms.logs.debug.rotation.size=20m + +# Maximum number of history files for the internal log. +#dbms.logs.debug.rotation.keep_number=7 + +#***************************************************************** +# Miscellaneous configuration +#***************************************************************** + +# Enable this to specify a parser other than the default one. +#cypher.default_language_version=2.3 + +# Determines if Cypher will allow using file URLs when loading data using +# `LOAD CSV`. Setting this value to `false` will cause Neo4j to fail `LOAD CSV` +# clauses that load data from the file system. +#dbms.security.allow_csv_import_from_file_urls=true + + +# Value of the Access-Control-Allow-Origin header sent over any HTTP or HTTPS +# connector. This defaults to '*', which allows broadest compatibility. Note +# that any URI provided here limits HTTP/HTTPS access to that URI only. +#dbms.security.http_access_control_allow_origin=* + +# Value of the HTTP Strict-Transport-Security (HSTS) response header. This header +# tells browsers that a webpage should only be accessed using HTTPS instead of HTTP. +# It is attached to every HTTPS response. Setting is not set by default so +# 'Strict-Transport-Security' header is not sent. Value is expected to contain +# directives like 'max-age', 'includeSubDomains' and 'preload'. +#dbms.security.http_strict_transport_security= + +# Retention policy for transaction logs needed to perform recovery and backups. +dbms.tx_log.rotation.retention_policy=1 days + +# Only allow read operations from this Neo4j instance. This mode still requires +# write access to the directory for lock purposes. +#dbms.read_only=false + +# Comma separated list of JAX-RS packages containing JAX-RS resources, one +# package name for each mountpoint. The listed package names will be loaded +# under the mountpoints specified. Uncomment this line to mount the +# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from +# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of +# http://localhost:7474/examples/unmanaged/helloworld/{nodeId} +#dbms.unmanaged_extension_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged + +# A comma separated list of procedures and user defined functions that are allowed +# full access to the database through unsupported/insecure internal APIs. +#dbms.security.procedures.unrestricted=my.extensions.example,my.procedures.* + +# A comma separated list of procedures to be loaded by default. +# Leaving this unconfigured will load all procedures found. +#dbms.security.procedures.whitelist=apoc.coll.*,apoc.load.* + +#******************************************************************** +# JVM Parameters +#******************************************************************** + +# G1GC generally strikes a good balance between throughput and tail +# latency, without too much tuning. +dbms.jvm.additional=-XX:+UseG1GC + +# Have common exceptions keep producing stack traces, so they can be +# debugged regardless of how often logs are rotated. +dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow + +# Make sure that `initmemory` is not only allocated, but committed to +# the process, before starting the database. This reduces memory +# fragmentation, increasing the effectiveness of transparent huge +# pages. It also reduces the possibility of seeing performance drop +# due to heap-growing GC events, where a decrease in available page +# cache leads to an increase in mean IO response time. +# Try reducing the heap memory, if this flag degrades performance. +dbms.jvm.additional=-XX:+AlwaysPreTouch + +# Trust that non-static final fields are really final. +# This allows more optimizations and improves overall performance. +# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or +# serialization to change the value of final fields! +dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions +dbms.jvm.additional=-XX:+TrustFinalNonStaticFields + +# Disable explicit garbage collection, which is occasionally invoked by the JDK itself. +dbms.jvm.additional=-XX:+DisableExplicitGC + +# Remote JMX monitoring, uncomment and adjust the following lines as needed. Absolute paths to jmx.access and +# jmx.password files are required. +# Also make sure to update the jmx.access and jmx.password files with appropriate permission roles and passwords, +# the shipped configuration contains only a read only role called 'monitor' with password 'Neo4j'. +# For more details, see: http://download.oracle.com/javase/8/docs/technotes/guides/management/agent.html +# On Unix based systems the jmx.password file needs to be owned by the user that will run the server, +# and have permissions set to 0600. +# For details on setting these file permissions on Windows see: +# http://docs.oracle.com/javase/8/docs/technotes/guides/management/security-windows.html +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.port=3637 +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.authenticate=true +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.ssl=false +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.password.file=/absolute/path/to/conf/jmx.password +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.access.file=/absolute/path/to/conf/jmx.access + +# Some systems cannot discover host name automatically, and need this line configured: +#dbms.jvm.additional=-Djava.rmi.server.hostname=$THE_NEO4J_SERVER_HOSTNAME + +# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes. +# This is to protect the server from any potential passive eavesdropping. +dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048 + +# This mitigates a DDoS vector. +dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true + +# This filter prevents deserialization of arbitrary objects via java object serialization, addressing potential vulnerabilities. +# By default this filter whitelists all neo4j classes, as well as classes from the hazelcast library and the java standard library. +# These defaults should only be modified by expert users! +# For more details (including filter syntax) see: https://openjdk.java.net/jeps/290 +#dbms.jvm.additional=-Djdk.serialFilter=java.**;org.neo4j.**;com.neo4j.**;com.hazelcast.**;net.sf.ehcache.Element;com.sun.proxy.*;org.openjdk.jmh.**;!* + +#******************************************************************** +# Wrapper Windows NT/2000/XP Service Properties +#******************************************************************** +# WARNING - Do not modify any of these properties when an application +# using this configuration file has been installed as a service. +# Please uninstall the service before modifying this section. The +# service can then be reinstalled. + +# Name of the service +dbms.windows_service_name=neo4j + +#******************************************************************** +# Other Neo4j system properties +#******************************************************************** +dbms.jvm.additional=-Dunsupported.dbms.udc.source=tarball diff --git a/packaging/src/main/distribution/text/community/cypher-plugins.xml b/packaging/src/main/distribution/text/community/cypher-plugins.xml new file mode 100644 index 00000000..d7f018f3 --- /dev/null +++ b/packaging/src/main/distribution/text/community/cypher-plugins.xml @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/packaging/src/main/distribution/text/community/data/databases/.keep b/packaging/src/main/distribution/text/community/data/databases/.keep new file mode 100644 index 00000000..e69de29b diff --git a/packaging/src/main/distribution/text/community/import/.keep b/packaging/src/main/distribution/text/community/import/.keep new file mode 100644 index 00000000..e69de29b diff --git a/packaging/src/main/distribution/text/community/plugins/.keep b/packaging/src/main/distribution/text/community/plugins/.keep new file mode 100644 index 00000000..e69de29b diff --git a/packaging/src/main/distribution/text/community/run/.keep b/packaging/src/main/distribution/text/community/run/.keep new file mode 100644 index 00000000..e69de29b diff --git a/pom.xml b/pom.xml index 5c304e0e..9e774c4b 100644 --- a/pom.xml +++ b/pom.xml @@ -4,112 +4,299 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 - cn.graiph - graiphdb-2019 - 1.0-SNAPSHOT + cn.pandadb + parent + pom + 0.0.2 + + java-driver + blob-commons + blob-feature + aipm-library + external-properties + server + commons + neo4j-hacking + network-commons + tools + itest + packaging + UTF-8 + UTF-8 1.8 1.8 2.11.8 2.11 2.11 + 0.0.2 - - org.scala-lang.modules - scala-parser-combinators_2.11 - 1.1.1 + org.scala-lang + scala-library - info.debatty - java-string-similarity - RELEASE + org.scala-lang + scala-compiler - com.google.code.findbugs - jsr305 - 3.0.0 - - - org.reactivestreams - reactive-streams - 1.0.2 - - - io.projectreactor - reactor-core - 3.2.6.RELEASE - - - org.springframework - spring-context - 4.0.0.RELEASE - - - commons-io - commons-io - 2.6 - - - commons-codec - commons-codec - 1.11 - - - eu.medsea.mimeutil - mime-util - 2.1.3 - - - org.apache.httpcomponents - httpclient - 4.5.7 - - - junit - junit - 4.12 - - - org.neo4j - neo4j - 3.5.6 - - - org.neo4j.app - neo4j-server - 3.5.6 - - - - org.apache.solr - solr-solrj - 6.0.0 + org.scala-lang + scala-reflect org.slf4j slf4j-log4j12 - 1.7.25 org.slf4j slf4j-api - 1.7.25 - - - org.scalatest - scalatest_${scala.compat.version} - 3.0.0 - test - - - org.apache.zookeeper - zookeeper - 3.4.14 + + + + + org.scala-lang + scala-library + ${scala.version} + + + org.scala-lang + scala-compiler + ${scala.version} + + + org.scala-lang + scala-reflect + ${scala.version} + + + + org.scala-lang.modules + scala-parser-combinators_2.11 + 1.1.1 + + + info.debatty + java-string-similarity + RELEASE + + + com.google.code.findbugs + jsr305 + 3.0.0 + + + org.reactivestreams + reactive-streams + 1.0.2 + + + io.projectreactor + reactor-core + 3.2.6.RELEASE + + + org.springframework + spring-context + 4.0.0.RELEASE + + + commons-io + commons-io + 2.6 + + + commons-codec + commons-codec + 1.11 + + + eu.medsea.mimeutil + mime-util + 2.1.3 + + + net.neoremind + kraps-rpc_2.11 + 1.0.0 + + + + + + + + + + + + + + + + + + + + + + + org.apache.httpcomponents + httpclient + 4.5.7 + + + junit + junit + 4.13.1 + + + org.neo4j + neo4j + 3.5.6 + + + org.neo4j.app + neo4j-server + 3.5.6 + + + org.apache.solr + solr-solrj + 6.0.0 + + + org.slf4j + slf4j-log4j12 + 1.7.25 + + + org.slf4j + slf4j-api + 1.7.25 + + + org.scalatest + scalatest_${scala.compat.version} + 3.0.0 + test + + + org.apache.httpcomponents + httpmime + 4.5.7 + + + org.apache.curator + curator-recipes + 2.10.0 + + + + + + + org.scalastyle + scalastyle-maven-plugin + 1.0.0 + + false + true + true + false + ${basedir}/src/main/scala + ${basedir}/src/test/scala + scalastyle-config.xml + ${basedir}/target/scalastyle-output.xml + ${project.build.sourceEncoding} + ${project.reporting.outputEncoding} + + + + + check + + + + + + + net.alchim31.maven + scala-maven-plugin + 3.2.1 + + + scala-compile-first + process-resources + + add-source + compile + + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + 3.0.2 + + + + default-jar + + + + true + + + ${project.organization.url} + ${moduleName} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/review/bluejoe_review.xml b/review/bluejoe_review.xml new file mode 100644 index 00000000..f05bca5b --- /dev/null +++ b/review/bluejoe_review.xml @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Solved. + + + + + deleted. + + + + I'm reconstruct this class. + + + + + + + + + + + + + + + + + + + At first, I designed the trait, supposed that there maybe another registry center which is not zk based. + + + + + + + + + + if cannot create a new Client, will reinvoke itself + + + + + + \ No newline at end of file diff --git a/scalastyle-config.xml b/scalastyle-config.xml new file mode 100644 index 00000000..0eb18643 --- /dev/null +++ b/scalastyle-config.xml @@ -0,0 +1,277 @@ + + + + Scalastyle standard configuration + + + + + + + + + + + + + + + + + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ARROW, EQUALS, ELSE, TRY, CATCH, FINALLY, LARROW, RARROW + + + + + + ARROW, EQUALS, COMMA, COLON, IF, ELSE, DO, WHILE, FOR, MATCH, TRY, CATCH, FINALLY, LARROW, RARROW + + + + + + + (\r|)\n(\s*)(\r|)\n(\s*)(\r|)\n + + + + + + + + + ^println$ + + + + + (\.toUpperCase|\.toLowerCase)(?!(\(|\(Locale.ROOT\))) + + + + + throw new \w+Error\( + + + + + throw new RuntimeException\( + + + + + + COMMA + + + + + \)\{ + + + + + (?m)^(\s*)/[*][*].*$(\r|)\n^\1 [*] + Use Javadoc style indentation for multiline comments + + + + case[^\n>]*=>\s*\{ + Omit braces in case clauses. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + org.apache.curator advised + + + + + + + + + + + + + + + + + 10240 + + + + + 300 + + + + + 10 + + + + + 50 + + + + + + + + + + + -1,0,1,2,3 + + + diff --git a/server/pom.xml b/server/pom.xml new file mode 100644 index 00000000..3d7efb09 --- /dev/null +++ b/server/pom.xml @@ -0,0 +1,87 @@ + + + + parent + cn.pandadb + 0.0.2 + ../ + + 4.0.0 + + cn.pandadb + server + + + + cn.pandadb + commons + ${pandadb.version} + compile + + + cn.pandadb + network-commons + ${pandadb.version} + compile + + + cn.pandadb + blob-feature + ${pandadb.version} + compile + + + cn.pandadb + external-properties + ${pandadb.version} + compile + + + cn.pandadb + java-driver + ${pandadb.version} + compile + + + org.neo4j.app + neo4j-server + + + org.scala-lang.modules + scala-parser-combinators_2.11 + + + org.springframework + spring-context + + + + com.google.code.gson + gson + 2.8.5 + + + + + + + net.alchim31.maven + scala-maven-plugin + 3.2.1 + + + scala-compile-first + process-resources + + add-source + compile + + + + + + + + \ No newline at end of file diff --git a/src/externel-properties/java/org/neo4j/bolt/v1/runtime/BoltAuthenticationHelper.java b/server/src/main/java/org/neo4j/bolt/v1/runtime/BoltAuthenticationHelper.java similarity index 52% rename from src/externel-properties/java/org/neo4j/bolt/v1/runtime/BoltAuthenticationHelper.java rename to server/src/main/java/org/neo4j/bolt/v1/runtime/BoltAuthenticationHelper.java index d9b2e538..a3693a2d 100644 --- a/src/externel-properties/java/org/neo4j/bolt/v1/runtime/BoltAuthenticationHelper.java +++ b/server/src/main/java/org/neo4j/bolt/v1/runtime/BoltAuthenticationHelper.java @@ -19,8 +19,9 @@ */ package org.neo4j.bolt.v1.runtime; -import java.util.Map; - +import cn.pandadb.server.internode.PNodeStatementProcessor; +import cn.pandadb.server.watchdog.ForwardedStatementProcessor; +import cn.pandadb.util.GlobalContext; import org.neo4j.bolt.runtime.BoltConnectionFatality; import org.neo4j.bolt.runtime.BoltStateMachineSPI; import org.neo4j.bolt.runtime.StateMachineContext; @@ -28,40 +29,36 @@ import org.neo4j.bolt.security.auth.AuthenticationResult; import org.neo4j.values.storable.Values; -public class BoltAuthenticationHelper -{ - public static boolean IS_DISPATCHER_NODE = true; +import java.util.Map; - public static boolean processAuthentication( String userAgent, Map authToken, StateMachineContext context ) throws BoltConnectionFatality - { - try - { +public class BoltAuthenticationHelper { + + public static boolean processAuthentication(String userAgent, Map authToken, StateMachineContext context) throws BoltConnectionFatality { + try { BoltStateMachineSPI boltSpi = context.boltSpi(); - AuthenticationResult authResult = boltSpi.authenticate( authToken ); + AuthenticationResult authResult = boltSpi.authenticate(authToken); String username = authResult.getLoginContext().subject().username(); - context.authenticatedAsUser( username, userAgent ); - - StatementProcessor statementProcessor = new TransactionStateMachine( boltSpi.transactionSpi(), authResult, context.clock() ); - //NOTE: dispatcher node or gnode? - if(IS_DISPATCHER_NODE) { - statementProcessor = new DispatchedStatementProcessor(statementProcessor, null); + context.authenticatedAsUser(username, userAgent); + StatementProcessor statementProcessor = new TransactionStateMachine(boltSpi.transactionSpi(), authResult, context.clock()); + //NOTE: pandadb + //is watch dog + if(GlobalContext.isWatchDog()) { + statementProcessor = new ForwardedStatementProcessor(statementProcessor, boltSpi.transactionSpi()); } - - context.connectionState().setStatementProcessor( statementProcessor ); - - if ( authResult.credentialsExpired() ) - { - context.connectionState().onMetadata( "credentials_expired", Values.TRUE ); + else { + statementProcessor = new PNodeStatementProcessor(statementProcessor, boltSpi.transactionSpi()); } - context.connectionState().onMetadata( "server", Values.stringValue( boltSpi.version() ) ); - boltSpi.udcRegisterClient( userAgent ); - + //NOTE + context.connectionState().setStatementProcessor(statementProcessor); + if (authResult.credentialsExpired()) { + context.connectionState().onMetadata("credentials_expired", Values.TRUE); + } + context.connectionState().onMetadata("server", Values.stringValue(boltSpi.version())); + boltSpi.udcRegisterClient(userAgent); return true; - } - catch ( Throwable t ) - { - context.handleFailure( t, true ); + } catch (Throwable t) { + context.handleFailure(t, true); return false; } } diff --git a/src/graiph-database/resources/browser/23eaba762d31f7c6f4d6.worker.js b/server/src/main/resources/browser/23eaba762d31f7c6f4d6.worker.js similarity index 100% rename from src/graiph-database/resources/browser/23eaba762d31f7c6f4d6.worker.js rename to server/src/main/resources/browser/23eaba762d31f7c6f4d6.worker.js diff --git a/src/graiph-database/resources/browser/app-d69f0f140465c60d7038.js b/server/src/main/resources/browser/app-d69f0f140465c60d7038.js similarity index 100% rename from src/graiph-database/resources/browser/app-d69f0f140465c60d7038.js rename to server/src/main/resources/browser/app-d69f0f140465c60d7038.js diff --git a/src/graiph-database/resources/browser/assets/click-next-f6af67414a67800d96d1163212644fed.png b/server/src/main/resources/browser/assets/click-next-f6af67414a67800d96d1163212644fed.png similarity index 100% rename from src/graiph-database/resources/browser/assets/click-next-f6af67414a67800d96d1163212644fed.png rename to server/src/main/resources/browser/assets/click-next-f6af67414a67800d96d1163212644fed.png diff --git a/src/graiph-database/resources/browser/assets/community-b5f64fe3d7d0a6384ba965cf55fcc319.jpg b/server/src/main/resources/browser/assets/community-b5f64fe3d7d0a6384ba965cf55fcc319.jpg similarity index 100% rename from src/graiph-database/resources/browser/assets/community-b5f64fe3d7d0a6384ba965cf55fcc319.jpg rename to server/src/main/resources/browser/assets/community-b5f64fe3d7d0a6384ba965cf55fcc319.jpg diff --git a/src/graiph-database/resources/browser/assets/customer-orders-6d6e459e9f19ee1031deee909694da2d.png b/server/src/main/resources/browser/assets/customer-orders-6d6e459e9f19ee1031deee909694da2d.png similarity index 100% rename from src/graiph-database/resources/browser/assets/customer-orders-6d6e459e9f19ee1031deee909694da2d.png rename to server/src/main/resources/browser/assets/customer-orders-6d6e459e9f19ee1031deee909694da2d.png diff --git a/src/graiph-database/resources/browser/assets/fonts/Inconsolata-Bold.ttf b/server/src/main/resources/browser/assets/fonts/Inconsolata-Bold.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/Inconsolata-Bold.ttf rename to server/src/main/resources/browser/assets/fonts/Inconsolata-Bold.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/Inconsolata-Regular.ttf b/server/src/main/resources/browser/assets/fonts/Inconsolata-Regular.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/Inconsolata-Regular.ttf rename to server/src/main/resources/browser/assets/fonts/Inconsolata-Regular.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-Bold.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-Bold.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-Bold.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-Bold.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-BoldItalic.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-BoldItalic.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-BoldItalic.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-BoldItalic.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-ExtraBold.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-ExtraBold.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-ExtraBold.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-ExtraBold.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-ExtraBoldItalic.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-ExtraBoldItalic.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-ExtraBoldItalic.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-ExtraBoldItalic.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-Italic.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-Italic.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-Italic.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-Italic.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-Light.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-Light.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-Light.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-Light.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-LightItalic.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-LightItalic.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-LightItalic.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-LightItalic.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-Regular.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-Regular.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-Regular.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-Regular.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-Semibold.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-Semibold.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-Semibold.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-Semibold.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/OpenSans-SemiboldItalic.ttf b/server/src/main/resources/browser/assets/fonts/OpenSans-SemiboldItalic.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/OpenSans-SemiboldItalic.ttf rename to server/src/main/resources/browser/assets/fonts/OpenSans-SemiboldItalic.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.eot b/server/src/main/resources/browser/assets/fonts/fontawesome-webfont.eot similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.eot rename to server/src/main/resources/browser/assets/fonts/fontawesome-webfont.eot diff --git a/src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.svg b/server/src/main/resources/browser/assets/fonts/fontawesome-webfont.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.svg rename to server/src/main/resources/browser/assets/fonts/fontawesome-webfont.svg diff --git a/src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.ttf b/server/src/main/resources/browser/assets/fonts/fontawesome-webfont.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.ttf rename to server/src/main/resources/browser/assets/fonts/fontawesome-webfont.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.woff b/server/src/main/resources/browser/assets/fonts/fontawesome-webfont.woff similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.woff rename to server/src/main/resources/browser/assets/fonts/fontawesome-webfont.woff diff --git a/src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.woff2 b/server/src/main/resources/browser/assets/fonts/fontawesome-webfont.woff2 similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/fontawesome-webfont.woff2 rename to server/src/main/resources/browser/assets/fonts/fontawesome-webfont.woff2 diff --git a/src/graiph-database/resources/browser/assets/fonts/neo4j-world.eot b/server/src/main/resources/browser/assets/fonts/neo4j-world.eot similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/neo4j-world.eot rename to server/src/main/resources/browser/assets/fonts/neo4j-world.eot diff --git a/src/graiph-database/resources/browser/assets/fonts/neo4j-world.svg b/server/src/main/resources/browser/assets/fonts/neo4j-world.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/neo4j-world.svg rename to server/src/main/resources/browser/assets/fonts/neo4j-world.svg diff --git a/src/graiph-database/resources/browser/assets/fonts/neo4j-world.ttf b/server/src/main/resources/browser/assets/fonts/neo4j-world.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/neo4j-world.ttf rename to server/src/main/resources/browser/assets/fonts/neo4j-world.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/neo4j-world.woff b/server/src/main/resources/browser/assets/fonts/neo4j-world.woff similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/neo4j-world.woff rename to server/src/main/resources/browser/assets/fonts/neo4j-world.woff diff --git a/src/graiph-database/resources/browser/assets/fonts/query-plan-operator-cost.svg b/server/src/main/resources/browser/assets/fonts/query-plan-operator-cost.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/query-plan-operator-cost.svg rename to server/src/main/resources/browser/assets/fonts/query-plan-operator-cost.svg diff --git a/src/graiph-database/resources/browser/assets/fonts/query-plan-operator-details.svg b/server/src/main/resources/browser/assets/fonts/query-plan-operator-details.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/query-plan-operator-details.svg rename to server/src/main/resources/browser/assets/fonts/query-plan-operator-details.svg diff --git a/src/graiph-database/resources/browser/assets/fonts/query-plan-operator-rows.svg b/server/src/main/resources/browser/assets/fonts/query-plan-operator-rows.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/query-plan-operator-rows.svg rename to server/src/main/resources/browser/assets/fonts/query-plan-operator-rows.svg diff --git a/src/graiph-database/resources/browser/assets/fonts/query-plan.svg b/server/src/main/resources/browser/assets/fonts/query-plan.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/query-plan.svg rename to server/src/main/resources/browser/assets/fonts/query-plan.svg diff --git a/src/graiph-database/resources/browser/assets/fonts/streamline.eot b/server/src/main/resources/browser/assets/fonts/streamline.eot similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/streamline.eot rename to server/src/main/resources/browser/assets/fonts/streamline.eot diff --git a/src/graiph-database/resources/browser/assets/fonts/streamline.svg b/server/src/main/resources/browser/assets/fonts/streamline.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/streamline.svg rename to server/src/main/resources/browser/assets/fonts/streamline.svg diff --git a/src/graiph-database/resources/browser/assets/fonts/streamline.ttf b/server/src/main/resources/browser/assets/fonts/streamline.ttf similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/streamline.ttf rename to server/src/main/resources/browser/assets/fonts/streamline.ttf diff --git a/src/graiph-database/resources/browser/assets/fonts/streamline.woff b/server/src/main/resources/browser/assets/fonts/streamline.woff similarity index 100% rename from src/graiph-database/resources/browser/assets/fonts/streamline.woff rename to server/src/main/resources/browser/assets/fonts/streamline.woff diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-144x144.png b/server/src/main/resources/browser/assets/images/device-icons/android-chrome-144x144.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-144x144.png rename to server/src/main/resources/browser/assets/images/device-icons/android-chrome-144x144.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-192x192.png b/server/src/main/resources/browser/assets/images/device-icons/android-chrome-192x192.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-192x192.png rename to server/src/main/resources/browser/assets/images/device-icons/android-chrome-192x192.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-36x36.png b/server/src/main/resources/browser/assets/images/device-icons/android-chrome-36x36.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-36x36.png rename to server/src/main/resources/browser/assets/images/device-icons/android-chrome-36x36.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-48x48.png b/server/src/main/resources/browser/assets/images/device-icons/android-chrome-48x48.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-48x48.png rename to server/src/main/resources/browser/assets/images/device-icons/android-chrome-48x48.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-72x72.png b/server/src/main/resources/browser/assets/images/device-icons/android-chrome-72x72.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-72x72.png rename to server/src/main/resources/browser/assets/images/device-icons/android-chrome-72x72.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-96x96.png b/server/src/main/resources/browser/assets/images/device-icons/android-chrome-96x96.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/android-chrome-96x96.png rename to server/src/main/resources/browser/assets/images/device-icons/android-chrome-96x96.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-114x114.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-114x114.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-114x114.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-114x114.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-120x120.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-120x120.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-120x120.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-120x120.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-144x144.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-144x144.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-144x144.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-144x144.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-152x152.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-152x152.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-152x152.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-152x152.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-180x180.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-180x180.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-180x180.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-180x180.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-57x57.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-57x57.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-57x57.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-57x57.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-60x60.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-60x60.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-60x60.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-60x60.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-72x72.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-72x72.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-72x72.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-72x72.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-76x76.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-76x76.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-76x76.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-76x76.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-precomposed.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-precomposed.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon-precomposed.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon-precomposed.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon.png b/server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/apple-touch-icon.png rename to server/src/main/resources/browser/assets/images/device-icons/apple-touch-icon.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/browserconfig.xml b/server/src/main/resources/browser/assets/images/device-icons/browserconfig.xml similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/browserconfig.xml rename to server/src/main/resources/browser/assets/images/device-icons/browserconfig.xml diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/favicon-16x16.png b/server/src/main/resources/browser/assets/images/device-icons/favicon-16x16.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/favicon-16x16.png rename to server/src/main/resources/browser/assets/images/device-icons/favicon-16x16.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/favicon-32x32.png b/server/src/main/resources/browser/assets/images/device-icons/favicon-32x32.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/favicon-32x32.png rename to server/src/main/resources/browser/assets/images/device-icons/favicon-32x32.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/favicon-96x96.png b/server/src/main/resources/browser/assets/images/device-icons/favicon-96x96.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/favicon-96x96.png rename to server/src/main/resources/browser/assets/images/device-icons/favicon-96x96.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/favicon.ico b/server/src/main/resources/browser/assets/images/device-icons/favicon.ico similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/favicon.ico rename to server/src/main/resources/browser/assets/images/device-icons/favicon.ico diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/manifest.json b/server/src/main/resources/browser/assets/images/device-icons/manifest.json similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/manifest.json rename to server/src/main/resources/browser/assets/images/device-icons/manifest.json diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/mstile-144x144.png b/server/src/main/resources/browser/assets/images/device-icons/mstile-144x144.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/mstile-144x144.png rename to server/src/main/resources/browser/assets/images/device-icons/mstile-144x144.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/mstile-150x150.png b/server/src/main/resources/browser/assets/images/device-icons/mstile-150x150.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/mstile-150x150.png rename to server/src/main/resources/browser/assets/images/device-icons/mstile-150x150.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/mstile-310x150.png b/server/src/main/resources/browser/assets/images/device-icons/mstile-310x150.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/mstile-310x150.png rename to server/src/main/resources/browser/assets/images/device-icons/mstile-310x150.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/mstile-310x310.png b/server/src/main/resources/browser/assets/images/device-icons/mstile-310x310.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/mstile-310x310.png rename to server/src/main/resources/browser/assets/images/device-icons/mstile-310x310.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/mstile-70x70.png b/server/src/main/resources/browser/assets/images/device-icons/mstile-70x70.png similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/mstile-70x70.png rename to server/src/main/resources/browser/assets/images/device-icons/mstile-70x70.png diff --git a/src/graiph-database/resources/browser/assets/images/device-icons/neo4j-desktop.svg b/server/src/main/resources/browser/assets/images/device-icons/neo4j-desktop.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/images/device-icons/neo4j-desktop.svg rename to server/src/main/resources/browser/assets/images/device-icons/neo4j-desktop.svg diff --git a/src/graiph-database/resources/browser/assets/js/canvg/StackBlur.js b/server/src/main/resources/browser/assets/js/canvg/StackBlur.js similarity index 100% rename from src/graiph-database/resources/browser/assets/js/canvg/StackBlur.js rename to server/src/main/resources/browser/assets/js/canvg/StackBlur.js diff --git a/src/graiph-database/resources/browser/assets/js/canvg/canvg.js b/server/src/main/resources/browser/assets/js/canvg/canvg.js similarity index 100% rename from src/graiph-database/resources/browser/assets/js/canvg/canvg.js rename to server/src/main/resources/browser/assets/js/canvg/canvg.js diff --git a/src/graiph-database/resources/browser/assets/js/canvg/rgbcolor.js b/server/src/main/resources/browser/assets/js/canvg/rgbcolor.js similarity index 100% rename from src/graiph-database/resources/browser/assets/js/canvg/rgbcolor.js rename to server/src/main/resources/browser/assets/js/canvg/rgbcolor.js diff --git a/src/graiph-database/resources/browser/assets/labeled_node-0ae16d31f7f48dd8aad5adb0f2cc162d.png b/server/src/main/resources/browser/assets/labeled_node-0ae16d31f7f48dd8aad5adb0f2cc162d.png similarity index 100% rename from src/graiph-database/resources/browser/assets/labeled_node-0ae16d31f7f48dd8aad5adb0f2cc162d.png rename to server/src/main/resources/browser/assets/labeled_node-0ae16d31f7f48dd8aad5adb0f2cc162d.png diff --git a/src/graiph-database/resources/browser/assets/more_nodes-ffa75b2028a6ccc15ebca2ad38745ec8.png b/server/src/main/resources/browser/assets/more_nodes-ffa75b2028a6ccc15ebca2ad38745ec8.png similarity index 100% rename from src/graiph-database/resources/browser/assets/more_nodes-ffa75b2028a6ccc15ebca2ad38745ec8.png rename to server/src/main/resources/browser/assets/more_nodes-ffa75b2028a6ccc15ebca2ad38745ec8.png diff --git a/src/graiph-database/resources/browser/assets/neo4j-world-16a20d139c28611513ec675e56d16d41.png b/server/src/main/resources/browser/assets/neo4j-world-16a20d139c28611513ec675e56d16d41.png similarity index 100% rename from src/graiph-database/resources/browser/assets/neo4j-world-16a20d139c28611513ec675e56d16d41.png rename to server/src/main/resources/browser/assets/neo4j-world-16a20d139c28611513ec675e56d16d41.png diff --git a/src/graiph-database/resources/browser/assets/one_node-d6c52c9505f9fc7fe0d2d11f4eeb7387.png b/server/src/main/resources/browser/assets/one_node-d6c52c9505f9fc7fe0d2d11f4eeb7387.png similarity index 100% rename from src/graiph-database/resources/browser/assets/one_node-d6c52c9505f9fc7fe0d2d11f4eeb7387.png rename to server/src/main/resources/browser/assets/one_node-d6c52c9505f9fc7fe0d2d11f4eeb7387.png diff --git a/src/graiph-database/resources/browser/assets/order-graph-10ba26057f99c821a6bdaf4148acbc78.png b/server/src/main/resources/browser/assets/order-graph-10ba26057f99c821a6bdaf4148acbc78.png similarity index 100% rename from src/graiph-database/resources/browser/assets/order-graph-10ba26057f99c821a6bdaf4148acbc78.png rename to server/src/main/resources/browser/assets/order-graph-10ba26057f99c821a6bdaf4148acbc78.png diff --git a/src/graiph-database/resources/browser/assets/product-category-supplier-b5a40101da41fc85d63062c8cb4db224.png b/server/src/main/resources/browser/assets/product-category-supplier-b5a40101da41fc85d63062c8cb4db224.png similarity index 100% rename from src/graiph-database/resources/browser/assets/product-category-supplier-b5a40101da41fc85d63062c8cb4db224.png rename to server/src/main/resources/browser/assets/product-category-supplier-b5a40101da41fc85d63062c8cb4db224.png diff --git a/src/graiph-database/resources/browser/assets/product-graph-c8122b0ef629f6164833602056e3e577.png b/server/src/main/resources/browser/assets/product-graph-c8122b0ef629f6164833602056e3e577.png similarity index 100% rename from src/graiph-database/resources/browser/assets/product-graph-c8122b0ef629f6164833602056e3e577.png rename to server/src/main/resources/browser/assets/product-graph-c8122b0ef629f6164833602056e3e577.png diff --git a/src/graiph-database/resources/browser/assets/query-plan-1dbe2ddf9e7148d121cd60dc96f15389.svg b/server/src/main/resources/browser/assets/query-plan-1dbe2ddf9e7148d121cd60dc96f15389.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/query-plan-1dbe2ddf9e7148d121cd60dc96f15389.svg rename to server/src/main/resources/browser/assets/query-plan-1dbe2ddf9e7148d121cd60dc96f15389.svg diff --git a/src/graiph-database/resources/browser/assets/query-plan-operator-cost-569d20fb755616dfa9c4b02bbff8e926.svg b/server/src/main/resources/browser/assets/query-plan-operator-cost-569d20fb755616dfa9c4b02bbff8e926.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/query-plan-operator-cost-569d20fb755616dfa9c4b02bbff8e926.svg rename to server/src/main/resources/browser/assets/query-plan-operator-cost-569d20fb755616dfa9c4b02bbff8e926.svg diff --git a/src/graiph-database/resources/browser/assets/query-plan-operator-details-b61fa6e320448faf748ef91b3a2125d5.svg b/server/src/main/resources/browser/assets/query-plan-operator-details-b61fa6e320448faf748ef91b3a2125d5.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/query-plan-operator-details-b61fa6e320448faf748ef91b3a2125d5.svg rename to server/src/main/resources/browser/assets/query-plan-operator-details-b61fa6e320448faf748ef91b3a2125d5.svg diff --git a/src/graiph-database/resources/browser/assets/query-plan-operator-rows-3755f84215d4c7e2b35aa8286a458fa9.svg b/server/src/main/resources/browser/assets/query-plan-operator-rows-3755f84215d4c7e2b35aa8286a458fa9.svg similarity index 100% rename from src/graiph-database/resources/browser/assets/query-plan-operator-rows-3755f84215d4c7e2b35aa8286a458fa9.svg rename to server/src/main/resources/browser/assets/query-plan-operator-rows-3755f84215d4c7e2b35aa8286a458fa9.svg diff --git a/src/graiph-database/resources/browser/assets/rel-props-f74e4cfe9d44f59af1c4c1eb228139b9.png b/server/src/main/resources/browser/assets/rel-props-f74e4cfe9d44f59af1c4c1eb228139b9.png similarity index 100% rename from src/graiph-database/resources/browser/assets/rel-props-f74e4cfe9d44f59af1c4c1eb228139b9.png rename to server/src/main/resources/browser/assets/rel-props-f74e4cfe9d44f59af1c4c1eb228139b9.png diff --git a/src/graiph-database/resources/browser/assets/relationships-3046fd08a53dc14b736c08ab0f518d7f.png b/server/src/main/resources/browser/assets/relationships-3046fd08a53dc14b736c08ab0f518d7f.png similarity index 100% rename from src/graiph-database/resources/browser/assets/relationships-3046fd08a53dc14b736c08ab0f518d7f.png rename to server/src/main/resources/browser/assets/relationships-3046fd08a53dc14b736c08ab0f518d7f.png diff --git a/src/graiph-database/resources/browser/assets/screen_code_frame-e2c698df376b5aa8cc07cd7f8ccae2ca.png b/server/src/main/resources/browser/assets/screen_code_frame-e2c698df376b5aa8cc07cd7f8ccae2ca.png similarity index 100% rename from src/graiph-database/resources/browser/assets/screen_code_frame-e2c698df376b5aa8cc07cd7f8ccae2ca.png rename to server/src/main/resources/browser/assets/screen_code_frame-e2c698df376b5aa8cc07cd7f8ccae2ca.png diff --git a/src/graiph-database/resources/browser/assets/screen_cypher_warn-a09604053edeb1b7d04adf56f1b3571b.png b/server/src/main/resources/browser/assets/screen_cypher_warn-a09604053edeb1b7d04adf56f1b3571b.png similarity index 100% rename from src/graiph-database/resources/browser/assets/screen_cypher_warn-a09604053edeb1b7d04adf56f1b3571b.png rename to server/src/main/resources/browser/assets/screen_cypher_warn-a09604053edeb1b7d04adf56f1b3571b.png diff --git a/src/graiph-database/resources/browser/assets/screen_editor-d6346b5cb91871943844584df77695ce.png b/server/src/main/resources/browser/assets/screen_editor-d6346b5cb91871943844584df77695ce.png similarity index 100% rename from src/graiph-database/resources/browser/assets/screen_editor-d6346b5cb91871943844584df77695ce.png rename to server/src/main/resources/browser/assets/screen_editor-d6346b5cb91871943844584df77695ce.png diff --git a/src/graiph-database/resources/browser/assets/screen_sidebar-045cbffb3261a8351cf29683f4742325.png b/server/src/main/resources/browser/assets/screen_sidebar-045cbffb3261a8351cf29683f4742325.png similarity index 100% rename from src/graiph-database/resources/browser/assets/screen_sidebar-045cbffb3261a8351cf29683f4742325.png rename to server/src/main/resources/browser/assets/screen_sidebar-045cbffb3261a8351cf29683f4742325.png diff --git a/src/graiph-database/resources/browser/assets/screen_stream-b84fff4144d58f194851153b9ddc9380.png b/server/src/main/resources/browser/assets/screen_stream-b84fff4144d58f194851153b9ddc9380.png similarity index 100% rename from src/graiph-database/resources/browser/assets/screen_stream-b84fff4144d58f194851153b9ddc9380.png rename to server/src/main/resources/browser/assets/screen_stream-b84fff4144d58f194851153b9ddc9380.png diff --git a/src/graiph-database/resources/browser/index.html b/server/src/main/resources/browser/index.html similarity index 100% rename from src/graiph-database/resources/browser/index.html rename to server/src/main/resources/browser/index.html diff --git a/src/graiph-database/resources/browser/main.chunkhash.bundle.js b/server/src/main/resources/browser/main.chunkhash.bundle.js similarity index 100% rename from src/graiph-database/resources/browser/main.chunkhash.bundle.js rename to server/src/main/resources/browser/main.chunkhash.bundle.js diff --git a/src/graiph-database/resources/browser/manifest.json b/server/src/main/resources/browser/manifest.json similarity index 100% rename from src/graiph-database/resources/browser/manifest.json rename to server/src/main/resources/browser/manifest.json diff --git a/src/graiph-database/resources/browser/sync-manager.chunkhash.bundle.js b/server/src/main/resources/browser/sync-manager.chunkhash.bundle.js similarity index 100% rename from src/graiph-database/resources/browser/sync-manager.chunkhash.bundle.js rename to server/src/main/resources/browser/sync-manager.chunkhash.bundle.js diff --git a/src/graiph-database/resources/browser/vendors~main.chunkhash.bundle.js b/server/src/main/resources/browser/vendors~main.chunkhash.bundle.js similarity index 100% rename from src/graiph-database/resources/browser/vendors~main.chunkhash.bundle.js rename to server/src/main/resources/browser/vendors~main.chunkhash.bundle.js diff --git a/server/src/main/resources/logo.txt b/server/src/main/resources/logo.txt new file mode 100644 index 00000000..19db1af4 --- /dev/null +++ b/server/src/main/resources/logo.txt @@ -0,0 +1,9 @@ + + ______ _ _____ ______ +(_____ \ | | (____ \ (____ \ + _____) )___ ____ _ | | ____ _ \ \ ____) ) +| ____/ _ | _ \ / || |/ _ | | | | __ ( +| | ( ( | | | | ( (_| ( ( | | |__/ /| |__) ) +|_| \_||_|_| |_|\____|\_||_|_____/ |______/ + +PandaDB Node Server (ver 0.0.2.20200320) diff --git a/server/src/main/scala/cn/pandadb/server/ClusterLog.scala b/server/src/main/scala/cn/pandadb/server/ClusterLog.scala new file mode 100644 index 00000000..4d8f9501 --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/ClusterLog.scala @@ -0,0 +1,79 @@ +package cn.pandadb.server + +import java.io._ + +import com.google.gson.Gson + +import scala.io.Source + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 13:10 2019/11/30 + * @Modified By: + */ + +case class DataLogDetail(val versionNum: Int, val command: String) { + +} + +trait DataLogWriter { + def write(row: DataLogDetail): Unit; + def getLastVersion(): Int; +} + +trait DataLogReader { + def consume[T](consumer: (DataLogDetail) => T, sinceVersion: Int = -1): Iterable[T]; + def getLastVersion(): Int; +} + +object JsonDataLogRW { + def open(file: File): JsonDataLogRW = { + if (!file.exists) { + file.getParentFile.mkdirs() + file.createNewFile() + } + new JsonDataLogRW(file) + } +} + +class JsonDataLogRW(logFile: File) extends DataLogWriter with DataLogReader { + + val logFIleIter: Iterator[String] = Source.fromFile(logFile).getLines() + + val _gson = new Gson() + + private var lastVersion: Int = { + if (logFile.length() == 0) { + -1 + } else { + var _tempVersion = -1 + val _iter = Source.fromFile(logFile).getLines() + _iter.foreach(line => { + val _lineSerialNum = _gson.fromJson(line, new DataLogDetail(0, "").getClass).versionNum + if(_lineSerialNum > _tempVersion) { + _tempVersion = _lineSerialNum + } + }) + _tempVersion + } + } + + override def consume[T](consumer: DataLogDetail => T, sinceVersion: Int): Iterable[T] = { + logFIleIter.toIterable.map(line => _gson.fromJson(line, new DataLogDetail(0, "").getClass)) + .filter(dataLogDetail => dataLogDetail.versionNum > sinceVersion) + .map(consumer(_)) + } + + override def write(row: DataLogDetail): Unit = { + lastVersion += 1 + val _fileAppender = new FileWriter(logFile, true) + _fileAppender.append(s"${_gson.toJson(row)}\n"); + _fileAppender.flush() + _fileAppender.close() + } + + override def getLastVersion(): Int = { + lastVersion + } +} diff --git a/server/src/main/scala/cn/pandadb/server/DataVersionRecovery.scala b/server/src/main/scala/cn/pandadb/server/DataVersionRecovery.scala new file mode 100644 index 00000000..342ecb94 --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/DataVersionRecovery.scala @@ -0,0 +1,43 @@ +package cn.pandadb.server + +import java.io.File + +import cn.pandadb.network.NodeAddress +import org.neo4j.driver.GraphDatabase + +/** + * @Author: Airzihao + * @Description: + * @Date: Created in 23:06 2019/12/2 + * @Modified By: + */ + +case class DataVersionRecoveryArgs(val localLogFile: File, val clusterLogFile: File, + val localNodeAddress: NodeAddress) + +class LocalDataVersionRecovery(args: DataVersionRecoveryArgs) { + val localLog = new JsonDataLogRW(args.localLogFile) + val sinceVersion: Int = localLog.getLastVersion() + val clusterLog = new JsonDataLogRW(args.clusterLogFile) + val clusterVersion: Int = clusterLog.getLastVersion() + + private def _collectCypherList(): List[String] = { + clusterLog.consume(logItem => logItem.command, sinceVersion).toList + } + + def updateLocalVersion(): Unit = { + if (clusterVersion > sinceVersion) { + val cypherList = _collectCypherList() + val boltURI = s"bolt://" + args.localNodeAddress.getAsString + val driver = GraphDatabase.driver(boltURI) + val session = driver.session() + cypherList.foreach(cypher => { + val _tx = session.beginTransaction() + _tx.run(cypher) + _tx.success() + _tx.close() + }) + session.close() + } + } +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/MasterRole.scala b/server/src/main/scala/cn/pandadb/server/MasterRole.scala new file mode 100644 index 00000000..ce6b6de2 --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/MasterRole.scala @@ -0,0 +1,142 @@ +package cn.pandadb.server + +import cn.pandadb.network._ +import org.apache.zookeeper.{CreateMode, ZooDefs} +import org.neo4j.driver._ + +import scala.collection.mutable.ListBuffer +import scala.concurrent.{Await, Future} +import scala.concurrent.duration._ +import scala.concurrent.ExecutionContext.Implicits.global + +/** + * @Author: Airzihao + * @Description: This class is instanced when a node is selected as the master node. + * @Date: Created at 13:13 2019/11/27 + * @Modified By: + */ + +trait Master { + + var allNodes: Iterable[NodeAddress] + + val clusterClient: ClusterClient + + var globalWriteLock: NaiveLock + + var globalReadLock: NaiveLock + + var listenerList: List[ZKClusterEventListener] + + def addListener(listener: ZKClusterEventListener) + + def clusterWrite(cypher: String) +} + +class MasterRole(zkClusterClient: ZookeeperBasedClusterClient, localAddress: NodeAddress) extends Master { + + val localNodeAddress = localAddress + override var listenerList: List[ZKClusterEventListener] = _ + + // how to init it? + private var currentState: ClusterState = new ClusterState {} + override val clusterClient = zkClusterClient + val masterNodeAddress = localAddress.getAsString + override var allNodes: Iterable[NodeAddress] = clusterClient.getAllNodes() + override var globalReadLock: NaiveLock = new NaiveReadLock(clusterClient) + override var globalWriteLock: NaiveLock = new NaiveWriteLock(clusterClient) + + private def initWriteContext(): Unit = { + globalReadLock = new NaiveReadLock(clusterClient) + globalWriteLock = new NaiveWriteLock(clusterClient) + } + + def setClusterState(state: ClusterState): Unit = { + currentState = state + } + + private def distributeWriteStatement(cypher: String): Unit = { + var tempResult: StatementResult = null + var futureTasks = new ListBuffer[Future[Boolean]] + for (nodeAddress <- allNodes) { + if (nodeAddress.getAsString != masterNodeAddress) { + val future = Future[Boolean] { + try { + val uri = s"bolt://" + nodeAddress.getAsString + val driver = GraphDatabase.driver(uri, + AuthTokens.basic("", "")) + val session = driver.session() + val tx = session.beginTransaction() + tempResult = tx.run(cypher) + tx.success() + tx.close() + session.close() + true + } catch { + case e: Exception => + throw new Exception("Write-cluster operation failed.") + false + } + } + futureTasks.append(future) + } + } + futureTasks.foreach(future => Await.result(future, 3.seconds)) + } + + // TODO finetune the state change mechanism + override def clusterWrite(cypher: String): Unit = { + val preVersion = zkClusterClient.getClusterDataVersion() + initWriteContext() + setClusterState(new Writing) + allNodes = clusterClient.getAllNodes() + globalWriteLock.lock() + // key func + distributeWriteStatement(cypher) + globalWriteLock.unlock() + setClusterState(new Finished) + setClusterState(new UnlockedServing) + val curVersion = preVersion + 1 + _setDataVersion(curVersion) + } + + def clusterRead(cypher: String): StatementResult = { + val iter = allNodes.iterator + var statementResult: StatementResult = null; + while (iter.hasNext) { + val str = iter.next().getAsString + if( str != masterNodeAddress) { + val uri = s"bolt://" + str + val driver = GraphDatabase.driver(uri) + statementResult = driver.session().run(cypher) + } + } + statementResult + } + + override def addListener(listener: ZKClusterEventListener): Unit = { + listenerList = listener :: listenerList + } + + private def _setDataVersion(curVersion: Int): Unit = { + _updateFreshNode() + clusterClient.curator.setData().forPath(ZKPathConfig.dataVersionPath, BytesTransform.serialize(curVersion)) + } + + private def _updateFreshNode(): Unit = { + val children = clusterClient.curator.getChildren.forPath(ZKPathConfig.freshNodePath) + // delete old node + if(children.isEmpty == false) { + val child = children.iterator() + while (child.hasNext) { + val fullPath = ZKPathConfig.freshNodePath + "/" + child.next() + clusterClient.curator.delete().forPath(fullPath) + } + } + val curFreshNodeRpc = MainServerContext.nodeAddress.getAsString + clusterClient.curator.create().creatingParentsIfNeeded() + .withMode(CreateMode.PERSISTENT) + .withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE) + .forPath(ZKPathConfig.freshNodePath + s"/" + curFreshNodeRpc) + } +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/NaiveLock.scala b/server/src/main/scala/cn/pandadb/server/NaiveLock.scala new file mode 100644 index 00000000..bcb9001e --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/NaiveLock.scala @@ -0,0 +1,74 @@ +package cn.pandadb.server + +import cn.pandadb.network.{NodeAddress, ZookeeperBasedClusterClient} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created in 23:28 2019/11/27 + * @Modified By: + */ +trait NaiveLock { + + def lock() + def unlock() +} + +class NaiveWriteLock(clusterClient: ZookeeperBasedClusterClient) extends NaiveLock { + + val allNodes = clusterClient.getAllNodes() + var nodeList = allNodes.toList + val masterNodeAddress: NodeAddress = clusterClient.getWriteMasterNode("").get + val register = new ZKServiceRegistry(clusterClient.zkServerAddress) + + override def lock(): Unit = { + while (nodeList.length == 0) { + Thread.sleep(1000) + nodeList = clusterClient.getAllNodes().toList + } + nodeList.foreach(lockOrdinaryNode(_)) + lockLeaderNode(masterNodeAddress) + } + + override def unlock(): Unit = { + nodeList.foreach(unlockOrdinaryNode(_)) + unlockLeaderNode(masterNodeAddress) + } + + def lockOrdinaryNode(node: NodeAddress): Unit = { + register.unRegisterOrdinaryNode(node) + } + + def lockLeaderNode(node: NodeAddress): Unit = { + register.unRegisterLeaderNode(node) + } + + def unlockOrdinaryNode(node: NodeAddress): Unit = { + register.registerAsOrdinaryNode(node) + } + def unlockLeaderNode(node: NodeAddress): Unit = { + register.registerAsLeader(node) + } +} + +class NaiveReadLock(clusterClient: ZookeeperBasedClusterClient) extends NaiveLock { + + val register = new ZKServiceRegistry(clusterClient.zkServerAddress) + var masterNodeAddress: NodeAddress = _ + + override def lock(): Unit = { + masterNodeAddress = clusterClient.getWriteMasterNode("").get + lockLeaderNode(masterNodeAddress) + } + + override def unlock(): Unit = { + unlockLeaderNode(masterNodeAddress) + } + + def lockLeaderNode(node: NodeAddress): Unit = { + register.unRegisterLeaderNode(node) + } + def unlockLeaderNode(node: NodeAddress): Unit = { + register.registerAsLeader(node) + } +} diff --git a/server/src/main/scala/cn/pandadb/server/PNodeServer.scala b/server/src/main/scala/cn/pandadb/server/PNodeServer.scala new file mode 100644 index 00000000..fe40df24 --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/PNodeServer.scala @@ -0,0 +1,149 @@ +package cn.pandadb.server + +import java.io.{File, FileInputStream} +import java.util.concurrent.CountDownLatch +import java.util.{Optional, Properties} + +import cn.pandadb.blob.BlobStorageModule +import cn.pandadb.cypherplus.CypherPlusModule +import cn.pandadb.externalprops.ExternalPropertiesModule +import cn.pandadb.network.{NodeAddress, ZKPathConfig, ZookeeperBasedClusterClient} +import cn.pandadb.server.internode.InterNodeRequestHandler +import cn.pandadb.server.neo4j.Neo4jRequestHandler +import cn.pandadb.server.rpc.{NettyRpcServer, PNodeRpcClient} +import cn.pandadb.util._ +import org.apache.commons.io.IOUtils +import org.apache.curator.framework.CuratorFramework +import org.apache.curator.framework.recipes.leader.{LeaderSelector, LeaderSelectorListenerAdapter} +import org.neo4j.driver.GraphDatabase +import org.neo4j.server.CommunityBootstrapper + +import scala.collection.JavaConversions + +/** + * Created by bluejoe on 2019/7/17. + */ +object PNodeServer extends Logging { + val logo = IOUtils.toString(this.getClass.getClassLoader.getResourceAsStream("logo.txt"), "utf-8"); + + def startServer(dbDir: File, configFile: File, overrided: Map[String, String] = Map()): PNodeServer = { + val props = new Properties() + props.load(new FileInputStream(configFile)) + val server = new PNodeServer(dbDir, JavaConversions.propertiesAsScalaMap(props).toMap ++ overrided); + server.start(); + server; + } +} + +class PNodeServer(dbDir: File, props: Map[String, String]) + extends LeaderSelectorListenerAdapter with Logging { + //TODO: we will replace neo4jServer with InterNodeRpcServer someday!! + val neo4jServer = new CommunityBootstrapper(); + val runningLock = new CountDownLatch(1) + + val modules = new PandaModules(); + val context = new ContextMap(); + + val config = new Configuration() { + override def getRaw(name: String): Option[String] = props.get(name) + } + + val pmc = PandaModuleContext(config, dbDir, context); + + modules.add(new MainServerModule()) + .add(new BlobStorageModule()) + .add(new ExternalPropertiesModule()) + .add(new CypherPlusModule()) + + modules.init(pmc); + + val np = MainServerContext.nodeAddress + + val serverKernel = new NettyRpcServer("0.0.0.0", MainServerContext.rpcPort, "PNodeRpc-service"); + serverKernel.accept(Neo4jRequestHandler()); + serverKernel.accept(InterNodeRequestHandler()); + + val dataLogRW = JsonDataLogRW.open(new File(dbDir, "dataVersionLog.json")) + + MainServerContext.bindDataLogReaderWriter(dataLogRW, dataLogRW) + val clusterClient: ZookeeperBasedClusterClient = new ZookeeperBasedClusterClient(MainServerContext.zkServerAddressStr) + + def start(): Unit = { + Runtime.getRuntime().addShutdownHook(new Thread() { + override def run(): Unit = { + shutdown(); + } + }); + + neo4jServer.start(dbDir, Optional.empty(), + JavaConversions.mapAsJavaMap(props + ("dbms.connector.bolt.listen_address" -> np.getAsString))); + + modules.start(pmc); + //FIXME: watch dog is not a PNode + if (!GlobalContext.isWatchDog()) { + serverKernel.start({ + //scalastyle:off + println(PNodeServer.logo); + + if (_isUpToDate() == false) { + _updateLocalData() + } + _joinInLeaderSelection() + new ZKServiceRegistry(MainServerContext.zkServerAddressStr).registerAsOrdinaryNode(np) + }) + } + } + + def shutdown(): Unit = { + modules.close(pmc); + runningLock.countDown() + serverKernel.shutdown(); + } + + override def takeLeadership(curatorFramework: CuratorFramework): Unit = { + + new ZKServiceRegistry(MainServerContext.zkServerAddressStr).registerAsLeader(np) + val masterRole = new MasterRole(clusterClient, np) + MainServerContext.bindMasterRole(masterRole) + + logger.debug(s"taken leader ship..."); + //yes, i won't quit, never! + runningLock.await() + logger.debug(s"shutdown..."); + } + + private def _joinInLeaderSelection(): Unit = { + val leaderSelector = new LeaderSelector(clusterClient.curator, ZKPathConfig.registryPath + "/_leader", this); + leaderSelector.start(); + } + + private def _isUpToDate(): Boolean = { + dataLogRW.getLastVersion() == clusterClient.getClusterDataVersion() + } + + //FIXME: updata->update + private def _updateLocalData(): Unit = { + // if can't get now, wait here. + val cypherArray = _getRemoteLogs() + + val localDriver = GraphDatabase.driver(s"bolt://${np.getAsString}") + val session = localDriver.session() + cypherArray.foreach(logItem => { + val tx = session.beginTransaction() + try { + val localPreVersion = dataLogRW.getLastVersion() + tx.run(logItem.command) + tx.success() + tx.close() + dataLogRW.write(logItem) + } + }) + } + + // todo: Iterable[] + private def _getRemoteLogs(): Iterable[DataLogDetail] = { + val lastFreshNodeIP = clusterClient.getFreshNodeIp() + val rpcClient = PNodeRpcClient.connect(NodeAddress.fromString(lastFreshNodeIP)) + rpcClient.getRemoteLogs(dataLogRW.getLastVersion()) + } +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/ServiceRegistry.scala b/server/src/main/scala/cn/pandadb/server/ServiceRegistry.scala new file mode 100644 index 00000000..0a4269ee --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/ServiceRegistry.scala @@ -0,0 +1,88 @@ +package cn.pandadb.server + +import cn.pandadb.network.{NodeAddress, ZKPathConfig} +import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} +import org.apache.curator.retry.ExponentialBackoffRetry +import org.apache.zookeeper.{CreateMode, ZooDefs} + +trait ServiceRegistry { + + def registry(servicePath: String, localNodeAddress: String) +} + +class ZKServiceRegistry(zkString: String) extends ServiceRegistry { + + var localNodeAddress: String = _ + val zkServerAddress = zkString + val curator: CuratorFramework = CuratorFrameworkFactory.newClient(zkServerAddress, + new ExponentialBackoffRetry(1000, 3)); + curator.start() + + def registry(servicePath: String, localNodeAddress: String): Unit = { + val registryPath = ZKPathConfig.registryPath + val nodeAddress = servicePath + s"/" + localNodeAddress + /* + * node mode in zk: + * pandaDB + * / | \ + * / | \ + * ordinaryNodes leader data + * / | \ + * addresses leaderAddress version(not implemented) + * + */ + + // Create registry node (pandanode, persistent) + if(curator.checkExists().forPath(registryPath) == null) { + curator.create() + .creatingParentsIfNeeded() + .withMode(CreateMode.PERSISTENT) + .withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE) + .forPath(registryPath) + } + + // Create service node (persistent) + if(curator.checkExists().forPath(servicePath) == null) { + curator.create() + .creatingParentsIfNeeded() + .withMode(CreateMode.PERSISTENT) + .withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE) + .forPath(servicePath) + } + + // Create address node (temp) + if(curator.checkExists().forPath(nodeAddress) == null) { + curator.create() + .creatingParentsIfNeeded() + .withMode(CreateMode.EPHEMERAL) + .withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE) + .forPath(nodeAddress) + } + } + + def registerAsOrdinaryNode(nodeAddress: NodeAddress): Unit = { + localNodeAddress = nodeAddress.getAsString + registry(ZKPathConfig.ordinaryNodesPath, localNodeAddress) + } + + def registerAsLeader(nodeAddress: NodeAddress): Unit = { + localNodeAddress = nodeAddress.getAsString + registry(ZKPathConfig.leaderNodePath, localNodeAddress) + } + + def unRegisterOrdinaryNode(node: NodeAddress): Unit = { + val nodeAddress = node.getAsString + val ordinaryNodePath = ZKPathConfig.ordinaryNodesPath + s"/" + nodeAddress + if(curator.checkExists().forPath(ordinaryNodePath) != null) { + curator.delete().forPath(ordinaryNodePath) + } + } + + def unRegisterLeaderNode(node: NodeAddress): Unit = { + val nodeAddress = node.getAsString + val leaderNodePath = ZKPathConfig.leaderNodePath + s"/" + nodeAddress + if(curator.checkExists().forPath(leaderNodePath) != null) { + curator.delete().forPath(leaderNodePath) + } + } +} diff --git a/server/src/main/scala/cn/pandadb/server/internode/PNodeStatementProcessor.scala b/server/src/main/scala/cn/pandadb/server/internode/PNodeStatementProcessor.scala new file mode 100644 index 00000000..ec72b14f --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/internode/PNodeStatementProcessor.scala @@ -0,0 +1,88 @@ +package cn.pandadb.server.internode + +import java.time.Duration +import java.util + +import cn.pandadb.cypherplus.utils.CypherPlusUtils +import cn.pandadb.server.{MainServerContext, DataLogDetail} +import cn.pandadb.util.GlobalContext +import org.neo4j.bolt.runtime.{BoltResult, StatementMetadata, StatementProcessor, TransactionStateMachineSPI} +import org.neo4j.bolt.v1.runtime.bookmarking.Bookmark +import org.neo4j.function.{ThrowingBiConsumer, ThrowingConsumer} +import org.neo4j.kernel.impl.util.ValueUtils +import org.neo4j.values.AnyValue +import org.neo4j.values.virtual.MapValue + +import scala.collection.{JavaConversions, mutable} + +/** + * Created by bluejoe on 2019/11/4. + */ +class PNodeStatementProcessor(source: StatementProcessor, spi: TransactionStateMachineSPI) extends StatementProcessor { + + override def markCurrentTransactionForTermination(): Unit = source.markCurrentTransactionForTermination() + + override def commitTransaction(): Bookmark = source.commitTransaction() + + override def run(statement: String, params: MapValue): StatementMetadata = source.run(statement, params) + + override def run(statement: String, params: MapValue, bookmark: Bookmark, txTimeout: Duration, + txMetaData: util.Map[String, AnyRef]): StatementMetadata = { + + // param transformation, contribute by codeBabyLin + val paramMap = new mutable.HashMap[String, AnyRef]() + val myConsumer = new ThrowingBiConsumer[String, AnyValue, Exception]() { + override def accept(var1: String, var2: AnyValue): Unit = { + val key = var1 + val value = ValueUtils.asValue(var2).asObject() + paramMap.update(key, value) + } + } + params.foreach(myConsumer) + val mapTrans = JavaConversions.mapAsJavaMap(paramMap) + + //pickup a runnable node + if (CypherPlusUtils.isWriteStatement(statement)) { + if (GlobalContext.isLeaderNode) { + val masterRole = MainServerContext.masterRole + masterRole.clusterWrite(statement) + } + val metaData = source.run(statement, params) + val curVersion = _getLocalDataVersion() + 1 + _writeDataLog(curVersion, statement) + metaData + } else { + source.run(statement, params) + } + } + + private def _getLocalDataVersion(): Int = { + MainServerContext.dataLogWriter.getLastVersion + } + + // pandaDB + private def _writeDataLog(curVersion: Int, cypher: String): Unit = { + val logItem = new DataLogDetail(curVersion, cypher) + MainServerContext.dataLogWriter.write(logItem) + } + + override def streamResult(resultConsumer: ThrowingConsumer[BoltResult, Exception]): Bookmark = { + source.streamResult(resultConsumer) + } + + override def hasOpenStatement: Boolean = source.hasOpenStatement + + override def rollbackTransaction(): Unit = source.rollbackTransaction() + + override def hasTransaction: Boolean = source.hasTransaction + + override def reset(): Unit = source.reset() + + override def validateTransaction(): Unit = source.validateTransaction() + + override def beginTransaction(bookmark: Bookmark): Unit = source.beginTransaction(bookmark) + + override def beginTransaction(bookmark: Bookmark, txTimeout: Duration, txMetadata: util.Map[String, AnyRef]): Unit = + source.beginTransaction(bookmark, txTimeout, txMetadata) + +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/internode/internode.scala b/server/src/main/scala/cn/pandadb/server/internode/internode.scala new file mode 100644 index 00000000..f8eeb225 --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/internode/internode.scala @@ -0,0 +1,23 @@ +package cn.pandadb.server.internode + +import cn.pandadb.network.internal.message.{InternalRpcRequest, InternalRpcResponse} +import cn.pandadb.server.rpc.RequestHandler +import cn.pandadb.server.{DataLogDetail, MainServerContext} + +/** + * Created by bluejoe on 2019/11/25. + */ +case class InterNodeRequestHandler() extends RequestHandler { + override val logic: PartialFunction[InternalRpcRequest, InternalRpcResponse] = { + case GetLogDetailsRequest(sinceVersion: Int) => + GetLogDetailsResponse(MainServerContext.dataLogReader.consume(logItem => logItem, sinceVersion)) + } +} + +case class GetLogDetailsRequest(sinceVersion: Int) extends InternalRpcRequest { + +} + +case class GetLogDetailsResponse(logs: Iterable[DataLogDetail]) extends InternalRpcResponse { + +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/module.scala b/server/src/main/scala/cn/pandadb/server/module.scala new file mode 100644 index 00000000..3869cd3e --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/module.scala @@ -0,0 +1,55 @@ +package cn.pandadb.server + +import cn.pandadb.network.{ClusterClient, NodeAddress, ZKPathConfig} +import cn.pandadb.util._ + +class MainServerModule extends PandaModule { + override def init(ctx: PandaModuleContext): Unit = { + val conf = ctx.configuration; + import ConfigUtils._ + MainServerContext.bindNodeAddress(NodeAddress.fromString(conf.getRequiredValueAsString("node.server.address"))); + MainServerContext.bindZKServerAddressStr(conf.getRequiredValueAsString("zookeeper.address")) + MainServerContext.bingRpcPort(conf.getRequiredValueAsInt("rpc.port")) + ZKPathConfig.initZKPath(MainServerContext.zkServerAddressStr) + } + + override def close(ctx: PandaModuleContext): Unit = { + + } + + override def start(ctx: PandaModuleContext): Unit = { + + } +} + +object MainServerContext extends ContextMap { + def bindMasterRole(role: MasterRole): Unit = { + GlobalContext.setLeaderNode(true) + super.put[MasterRole](role); + } + + def bindDataLogReaderWriter(logReader: DataLogReader, logWriter: DataLogWriter): Unit = { + super.put[DataLogReader](logReader) + super.put[DataLogWriter](logWriter) + } + + def bindZKServerAddressStr(zkAddressString: String): Unit = put("zookeeper.address", zkAddressString) + + def dataLogWriter: DataLogWriter = super.get[DataLogWriter] + + def dataLogReader: DataLogReader = super.get[DataLogReader] + + def bindNodeAddress(nodeAddress: NodeAddress): Unit = put("node.server.address", nodeAddress); + + def bingRpcPort(port: Int): Unit = put("rpcPort", port) + + def rpcPort: Int = get("rpcPort") + + def nodeAddress: NodeAddress = get("node.server.address"); + + def zkServerAddressStr: String = get("zookeeper.address"); + + def masterRole: MasterRole = super.get[MasterRole] + + def clusterClient: ClusterClient = super.get[ClusterClient] +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/neo4j/Neo4jAgent.scala b/server/src/main/scala/cn/pandadb/server/neo4j/Neo4jAgent.scala new file mode 100644 index 00000000..1f742db8 --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/neo4j/Neo4jAgent.scala @@ -0,0 +1,39 @@ +package cn.pandadb.server.neo4j + +import cn.pandadb.network.internal.message.{InternalRpcRequest, InternalRpcResponse} +import cn.pandadb.server.rpc.RequestHandler + +/** + * Created by bluejoe on 2019/11/25. + */ +case class Neo4jRequestHandler() extends RequestHandler { + override val logic: PartialFunction[InternalRpcRequest, InternalRpcResponse] = { + //example code + case RunCommandRequest(command: String) => + RunCommandResponse(Array()) + } +} + +case class RunCommandRequest(command: String) extends InternalRpcRequest { + +} + +case class RunCommandResponse(results: Array[Result]) extends InternalRpcResponse { + +} + +case class BeginTransactionRequest() extends InternalRpcRequest { + +} + +case class CloseTransactionRequest() extends InternalRpcRequest { + +} + +class Result { + +} + +case class ServerSideExceptionResponse(msg: String) extends InternalRpcResponse { + +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/rpc/NettyRpcClient.scala b/server/src/main/scala/cn/pandadb/server/rpc/NettyRpcClient.scala new file mode 100644 index 00000000..2fed8558 --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/rpc/NettyRpcClient.scala @@ -0,0 +1,48 @@ +package cn.pandadb.server.rpc + +import cn.pandadb.network.NodeAddress +import cn.pandadb.server.DataLogDetail +import cn.pandadb.server.internode.{GetLogDetailsRequest, GetLogDetailsResponse} +import cn.pandadb.util.Logging +import net.neoremind.kraps.RpcConf +import net.neoremind.kraps.rpc.{RpcAddress, RpcEnv, RpcEnvClientConfig} +import net.neoremind.kraps.rpc.netty.NettyRpcEnvFactory + +import scala.concurrent.duration.Duration +import scala.concurrent.Await + +object PNodeRpcClient { + + // if can't connect, wait for it + def connect(remoteAddress: NodeAddress): PNodeRpcClient = { + try { + new PNodeRpcClient(remoteAddress) + } + catch { + case e: Exception => + Thread.sleep(2000) + connect(remoteAddress) + } + } +} + +case class PNodeRpcClient(val remoteAddress: NodeAddress) extends Logging { + + val rpcEnv: RpcEnv = { + val rpcConf = new RpcConf() + val config = RpcEnvClientConfig(rpcConf, "PNodeRpc-client") + NettyRpcEnvFactory.create(config) + } + + val endPointRef = rpcEnv.setupEndpointRef(RpcAddress(remoteAddress.host, remoteAddress.port), "PNodeRpc-service") + + def close(): Unit = { + rpcEnv.stop(endPointRef) + } + + def getRemoteLogs(sinceVersion: Int): Iterable[DataLogDetail] = { + val response: GetLogDetailsResponse = Await.result(endPointRef.ask[GetLogDetailsResponse](GetLogDetailsRequest(sinceVersion)), Duration.Inf) + response.logs + } + +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/rpc/NettyRpcServer.scala b/server/src/main/scala/cn/pandadb/server/rpc/NettyRpcServer.scala new file mode 100644 index 00000000..4b38cfee --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/rpc/NettyRpcServer.scala @@ -0,0 +1,53 @@ +package cn.pandadb.server.rpc + +import cn.pandadb.network.internal.message.{InternalRpcRequest, InternalRpcResponse} +import cn.pandadb.util.Logging + +import net.neoremind.kraps.RpcConf +import net.neoremind.kraps.rpc.netty.NettyRpcEnvFactory +import net.neoremind.kraps.rpc.{RpcCallContext, RpcEndpoint, RpcEnv, RpcEnvServerConfig} + +import scala.collection.mutable.ArrayBuffer + +/** + * Created by bluejoe on 2019/11/25. + */ +class NettyRpcServer(host: String, port: Int, serverName: String) extends Logging { + val config = RpcEnvServerConfig(new RpcConf(), serverName, host, port) + val thisRpcEnv = NettyRpcEnvFactory.create(config) + val handlers = ArrayBuffer[PartialFunction[InternalRpcRequest, InternalRpcResponse]](); + + val endpoint: RpcEndpoint = new RpcEndpoint() { + override val rpcEnv: RpcEnv = thisRpcEnv; + + override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = { + case request: InternalRpcRequest => + val response = handlers.find { + _.isDefinedAt(request) + }.map(_.apply(request)).get + context.reply(response) + } + } + + def accept(handler: PartialFunction[InternalRpcRequest, InternalRpcResponse]): Unit = { + handlers += handler; + } + + def accept(handler: RequestHandler): Unit = { + handlers += handler.logic; + } + + def start(onStarted: => Unit = {}) { + thisRpcEnv.setupEndpoint(serverName, endpoint) + onStarted; + thisRpcEnv.awaitTermination() + } + + def shutdown(): Unit = { + thisRpcEnv.shutdown() + } +} + +trait RequestHandler { + val logic: PartialFunction[InternalRpcRequest, InternalRpcResponse]; +} \ No newline at end of file diff --git a/server/src/main/scala/cn/pandadb/server/watchdog/forward.scala b/server/src/main/scala/cn/pandadb/server/watchdog/forward.scala new file mode 100644 index 00000000..823e84f4 --- /dev/null +++ b/server/src/main/scala/cn/pandadb/server/watchdog/forward.scala @@ -0,0 +1,205 @@ +package cn.pandadb.server.watchdog + +import java.time.Duration +import java.{lang, util} + +import cn.pandadb.server.MainServerContext +import org.neo4j.bolt.runtime.BoltResult.Visitor +import org.neo4j.bolt.runtime.{BoltResult, StatementMetadata, StatementProcessor, TransactionStateMachineSPI} +import org.neo4j.bolt.v1.runtime.bookmarking.Bookmark +import org.neo4j.cypher.result.QueryResult +import org.neo4j.driver._ +import org.neo4j.driver.internal.util.Clock +import org.neo4j.driver.internal.value._ +import org.neo4j.function.{ThrowingBiConsumer, ThrowingConsumer} +import org.neo4j.graphdb.{Direction, GraphDatabaseService, Label, Node, Relationship, RelationshipType} +import org.neo4j.kernel.impl.util.ValueUtils +import org.neo4j.values.AnyValue +import org.neo4j.values.virtual.MapValue + +import scala.collection.{JavaConversions, mutable} + +/** + * Created by bluejoe on 2019/11/4. + */ +object ClientPool { + lazy val session = { + val pandaString = s"panda://${MainServerContext.zkServerAddressStr}/db" + val driver = GraphDatabase.driver(pandaString, AuthTokens.basic("", "")); + driver.session(); + } +} + +class ForwardedStatementProcessor(source: StatementProcessor, spi: TransactionStateMachineSPI) extends StatementProcessor { + val session = ClientPool.session + val clock = Clock.SYSTEM; + var _currentTransaction: Transaction = null; + var _currentStatementResult: StatementResult = null; + + override def markCurrentTransactionForTermination(): Unit = source.markCurrentTransactionForTermination() + + override def commitTransaction(): Bookmark = source.commitTransaction() + + override def run(statement: String, params: MapValue): StatementMetadata = source.run(statement, params) + + override def run(statement: String, params: MapValue, bookmark: Bookmark, + txTimeout: Duration, txMetaData: util.Map[String, AnyRef]): StatementMetadata = { + + // param transformation, contribute by codeBabyLin + val paramMap = new mutable.HashMap[String, AnyRef]() + val myConsumer = new ThrowingBiConsumer[String, AnyValue, Exception]() { + override def accept(var1: String, var2: AnyValue): Unit = { + val key = var1 + val value = ValueUtils.asValue(var2).asObject() + paramMap.update(key, value) + } + } + params.foreach(myConsumer) + + val mapTrans = JavaConversions.mapAsJavaMap(paramMap) + //extract metadata from _currentStatementResult. + _currentTransaction = session.beginTransaction(); + _currentStatementResult = _currentTransaction.run(statement, mapTrans) + new MyStatementMetadata(_currentStatementResult) + } + + override def streamResult(resultConsumer: ThrowingConsumer[BoltResult, Exception]): Bookmark = { + resultConsumer.accept(new MyBoltResult(_currentStatementResult)); + //return bookmark + new Bookmark(spi.newestEncounteredTxId()); + } + + class MyBoltResult(result: StatementResult) extends BoltResult { + override def fieldNames(): Array[String] = JavaConversions.collectionAsScalaIterable(result.keys()).toArray + + override def accept(visitor: Visitor): Unit = { + val start = clock.millis(); + + val it = result.stream().iterator(); + while (it.hasNext) { + val record = it.next(); + visitor.visit(new MyRecord(record)); + } + + visitor.addMetadata("result_consumed_after", org.neo4j.values.storable.Values.longValue(clock.millis() - start)); + //query_type? + } + + override def close(): Unit = _currentTransaction.close() + } + + class MyRecord(record: Record) extends QueryResult.Record { + override def fields(): Array[AnyValue] = { + JavaConversions.collectionAsScalaIterable(record.values()).map { + value: Value => + value match { + //TODO: check different types of XxxValue, unpack and use ValueUtils to transform + case v: NodeValue => ValueUtils.asAnyValue(new MyDriverNodeToDbNode(v)) + case v: org.neo4j.driver.internal.value.MapValue => ValueUtils.asAnyValue(v.asMap()) + case v: ListValue => ValueUtils.asAnyValue(v.asList()) + case v: IntegerValue => ValueUtils.asAnyValue(v.asLong()) + case v: FloatValue => ValueUtils.asAnyValue(v.asDouble()) + case v: BooleanValue => ValueUtils.asAnyValue(v.asBoolean()) + case v: DateValue => ValueUtils.asAnyValue(v.asLocalDate()) + case v: DateTimeValue => ValueUtils.asAnyValue(v.asLocalDateTime()) + case v: StringValue => ValueUtils.asAnyValue(v.asString()) + case _ => + ValueUtils.asAnyValue(value.asObject()) + } + }.toArray + } + } + + class MyStatementMetadata(result: StatementResult) extends StatementMetadata { + override def fieldNames(): Array[String] = JavaConversions.collectionAsScalaIterable(result.keys()).toArray + } + + override def hasOpenStatement: Boolean = source.hasOpenStatement + + override def rollbackTransaction(): Unit = source.rollbackTransaction() + + override def hasTransaction: Boolean = source.hasTransaction + + override def reset(): Unit = source.reset() + + override def validateTransaction(): Unit = source.validateTransaction() + + override def beginTransaction(bookmark: Bookmark): Unit = source.beginTransaction(bookmark) + + override def beginTransaction(bookmark: Bookmark, txTimeout: Duration, + txMetadata: util.Map[String, AnyRef]): Unit = + source.beginTransaction(bookmark, txTimeout, txMetadata) + + // class for driver node type transform to DB's node type + class MyDriverNodeToDbNode(driverNode: NodeValue) extends Node { + + override def getId: Long = driverNode.asEntity().id() + + override def delete(): Unit = {} + + override def getRelationships: lang.Iterable[Relationship] = JavaConversions.asJavaIterable(None) + + override def hasRelationship: Boolean = false + + override def getRelationships(types: RelationshipType*): lang.Iterable[Relationship] = JavaConversions.asJavaIterable(None) + + override def getRelationships(direction: Direction, types: RelationshipType*): lang.Iterable[Relationship] = JavaConversions.asJavaIterable(None) + + override def hasRelationship(types: RelationshipType*): Boolean = false + + override def hasRelationship(direction: Direction, types: RelationshipType*): Boolean = false + + override def getRelationships(dir: Direction): lang.Iterable[Relationship] = JavaConversions.asJavaIterable(None) + + override def hasRelationship(dir: Direction): Boolean = false + + override def getRelationships(`type`: RelationshipType, dir: Direction): lang.Iterable[Relationship] = JavaConversions.asJavaIterable(None) + + override def hasRelationship(`type`: RelationshipType, dir: Direction): Boolean = false + + override def getSingleRelationship(`type`: RelationshipType, dir: Direction): Relationship = null + + override def createRelationshipTo(otherNode: Node, `type`: RelationshipType): Relationship = null + + override def getRelationshipTypes: lang.Iterable[RelationshipType] = JavaConversions.asJavaIterable(None) + + override def getDegree: Int = 0 + + override def getDegree(`type`: RelationshipType): Int = 0 + + override def getDegree(direction: Direction): Int = 0 + + override def getDegree(`type`: RelationshipType, direction: Direction): Int = 0 + + override def addLabel(label: Label): Unit = {} + + override def removeLabel(label: Label): Unit = {} + + override def hasLabel(label: Label): Boolean = true + + override def getLabels: lang.Iterable[Label] = { + val itor = JavaConversions.asScalaIterator(driverNode.asNode().labels().iterator()) + val iter = itor.map(label => Label.label(label)) + JavaConversions.asJavaIterable(iter.toIterable) + } + + override def getGraphDatabase: GraphDatabaseService = null + + override def hasProperty(key: String): Boolean = driverNode.get(key) != null + + override def getProperty(key: String): AnyRef = driverNode.get(key).asObject() + + override def getProperty(key: String, defaultValue: Any): AnyRef = driverNode.get(key, defaultValue) + + override def setProperty(key: String, value: Any): Unit = {} + + override def removeProperty(key: String): AnyRef = null + + override def getPropertyKeys: lang.Iterable[String] = JavaConversions.asJavaIterable(None) + + override def getProperties(keys: String*): util.Map[String, AnyRef] = JavaConversions.mapAsJavaMap(Map()) + + override def getAllProperties: util.Map[String, AnyRef] = driverNode.asEntity().asMap() + } + +} \ No newline at end of file diff --git a/server/src/test/resources/test_pnode0.conf b/server/src/test/resources/test_pnode0.conf new file mode 100644 index 00000000..0ae98a18 --- /dev/null +++ b/server/src/test/resources/test_pnode0.conf @@ -0,0 +1,37 @@ +dbms.security.auth_enabled=false +dbms.connector.bolt.enabled=true +dbms.connector.bolt.tls_level=OPTIONAL +dbms.connector.bolt.listen_address=0.0.0.0:7685 + +dbms.connector.http.enabled=true +dbms.connector.http.listen_address=localhost:7469 +dbms.connector.https.enabled=false +dbms.logs.http.enabled=true + +blob.plugins.conf=./cypher-plugins.xml + +#blob.storage=cn.pidb.engine.HBaseBlobValueStorage +blob.storage.hbase.zookeeper.port=2181 +blob.storage.hbase.zookeeper.quorum=localhost +blob.storage.hbase.auto_create_table=true +blob.storage.hbase.table=PIDB_BLOB + +blob.aipm.modules.enabled=false +blob.aipm.modules.dir=/usr/local/aipm/modules/ + +#blob.storage=cn.pidb.engine.FileBlobValueStorage +#blob.storage.file.dir=/tmp + +#dbms.active_database=testdb +aipm.http.host.url=http://10.0.86.128:8081/ + +# fake address just for test, supposed to be the real ip:port of localhost:boltPort +#serviceAddress=10.0.88.11:1111 +# zookeeper revelant args +zkServerAddress=10.0.86.26:2181,10.0.86.27:2181,10.0.86.70:2181 +sessionTimeout=20000 +connectionTimeout=10000 +registryPath=/pandaNodes +ordinaryNodesPath=/pandaNodes/ordinaryNodes +leaderNodePath=/pandaNodes/leaderNode +localNodeAddress=10.0.88.11:1111 \ No newline at end of file diff --git a/server/src/test/scala/ClusterLogTest.scala b/server/src/test/scala/ClusterLogTest.scala new file mode 100644 index 00000000..8fbed081 --- /dev/null +++ b/server/src/test/scala/ClusterLogTest.scala @@ -0,0 +1,61 @@ +import java.io.{BufferedReader, File, FileInputStream, InputStreamReader} + +import cn.pandadb.server.{DataLogDetail, JsonDataLogRW} +import org.junit.{Assert, BeforeClass, Test} +import ClusterLogTest.logFile +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 19:21 2019/12/1 + * @Modified By: + */ +object ClusterLogTest { + + val testdataPath: String = "./src/test/testdata/"; + val logFilePath: String = "./src/test/testdata/datalog.json" + val logFile = new File(logFilePath) + @BeforeClass + def prepareLogFile(): Unit = { + if (logFile.exists()) { + logFile.delete() + logFile.createNewFile() + } else { + new File(testdataPath).mkdirs() + logFile.createNewFile() + } + } +} + +class ClusterLogTest { + + val expectedLogArray1: Array[String] = Array("Match(n2), return n2;") + val expectedLogArray2: Array[String] = Array("Match(n2), return n2;", "Match(n3), return n3;") + + @Test + def test1(): Unit = { + val jsonDataLogRW = JsonDataLogRW.open(logFile) + Assert.assertEquals(0, logFile.length()) + jsonDataLogRW.write(DataLogDetail(1, "Match(n1), return n1;")) + jsonDataLogRW.write(DataLogDetail(2, "Match(n2), return n2;")) + val _bf = new BufferedReader(new InputStreamReader(new FileInputStream(logFile))) + Assert.assertEquals(s"""{"versionNum":${1},"command":"Match(n1), return n1;"}""", _bf.readLine()) + Assert.assertEquals(s"""{"versionNum":${2},"command":"Match(n2), return n2;"}""", _bf.readLine()) + } + + @Test + def test2(): Unit = { + val jsonDataLogRW = JsonDataLogRW.open(logFile) + val commandList = jsonDataLogRW.consume(logItem => logItem.command, 1) + Assert.assertEquals(expectedLogArray1.head, commandList.toList.head) + } + + @Test + def test3(): Unit = { + val jsonDataLog = JsonDataLogRW.open(logFile) + jsonDataLog.write(DataLogDetail(3, "Match(n3), return n3;")) + val commandList = jsonDataLog.consume(logItem => logItem.command, 1) + val commandArr = commandList.toArray + Assert.assertEquals(expectedLogArray2(0), commandArr(0)) + Assert.assertEquals(expectedLogArray2(1), commandArr(1)) + } +} \ No newline at end of file diff --git a/server/src/test/scala/NaiveLockTest.scala b/server/src/test/scala/NaiveLockTest.scala new file mode 100644 index 00000000..8ddb22f9 --- /dev/null +++ b/server/src/test/scala/NaiveLockTest.scala @@ -0,0 +1,79 @@ +import NaiveLockTest._ +import cn.pandadb.network.{NodeAddress, ZookeeperBasedClusterClient} +import cn.pandadb.server.{MasterRole, ZKServiceRegistry} +import org.junit.runners.MethodSorters +import org.junit.{Assert, FixMethodOrder, Test} +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 9:06 2019/11/28 + * @Modified By: + */ + +object NaiveLockTest { + val zkString = "10.0.86.26:2181" + val localNodeAddress = "10.0.88.11:1111" + +// val localPNodeServer = new LocalServerThread(0) + val nodeList = List("10.0.88.11:1111", "10.0.88.22:2222", "10.0.88.33:3333", "10.0.88.44:4444") + val clusterClient = new ZookeeperBasedClusterClient(zkString) + val master = { + val _register = new ZKServiceRegistry(zkString) + _register.registerAsLeader(NodeAddress.fromString("10.0.88.11:1111")) + val mR = new MasterRole(clusterClient, NodeAddress.fromString(localNodeAddress)) + _register.unRegisterLeaderNode(NodeAddress.fromString("10.0.88.11:1111")) + _register.unRegisterOrdinaryNode(NodeAddress.fromString("10.0.88.11:1111")) + mR + } + val register = new ZKServiceRegistry(zkString) +} + +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +class NaiveLockTest { + + //register nodes + @Test + def test1(): Unit = { + + Assert.assertEquals(true, clusterClient.getAllNodes().isEmpty) + Assert.assertEquals(true, clusterClient.getWriteMasterNode("").isEmpty) + + nodeList.foreach(nodeStr => register.registerAsOrdinaryNode(NodeAddress.fromString(nodeStr))) + register.registerAsLeader(NodeAddress.fromString(nodeList.head)) + + Assert.assertEquals(nodeList.head, clusterClient.getWriteMasterNode("").get.getAsString) + val a = clusterClient.getAllNodes().map(_.getAsString).toList + Assert.assertEquals(true, compareList(nodeList, clusterClient.getAllNodes())) + + } + + // test write lock + @Test + def test2(): Unit = { + + master.globalWriteLock.lock() + Assert.assertEquals(true, clusterClient.getAllNodes().isEmpty) + Assert.assertEquals(true, clusterClient.getWriteMasterNode("").isEmpty) + master.globalWriteLock.unlock() + Assert.assertEquals(nodeList.head, clusterClient.getWriteMasterNode("").get.getAsString) + Assert.assertEquals(true, compareList(nodeList, clusterClient.getAllNodes())) + } + + // test read lock + @Test + def test3(): Unit = { + master.globalReadLock.lock() + Thread.sleep(3000) + Assert.assertEquals(true, compareList(nodeList, clusterClient.getAllNodes())) + Assert.assertEquals(false, clusterClient.getWriteMasterNode("").getOrElse(false)) + master.globalReadLock.unlock() + Thread.sleep(3000) + Assert.assertEquals(nodeList.head, clusterClient.getWriteMasterNode("").get.getAsString) + Assert.assertEquals(true, compareList(nodeList, clusterClient.getAllNodes())) + } + + def compareList(srtList: List[String], allNodes: Iterable[NodeAddress]): Boolean = { + allNodes.map(_.getAsString).toSet.equals(srtList.toSet) + } + +} diff --git a/server/src/test/scala/RpcServerTest.scala b/server/src/test/scala/RpcServerTest.scala new file mode 100644 index 00000000..fe340751 --- /dev/null +++ b/server/src/test/scala/RpcServerTest.scala @@ -0,0 +1,62 @@ +import cn.pandadb.network.internal.message.{InternalRpcRequest, InternalRpcResponse} +import cn.pandadb.server.rpc.{NettyRpcServer, RequestHandler} +import net.neoremind.kraps.RpcConf +import net.neoremind.kraps.rpc.netty.NettyRpcEnvFactory +import net.neoremind.kraps.rpc.{RpcAddress, RpcEnv, RpcEnvClientConfig} +import org.junit.Test + +import scala.concurrent.Await +import scala.concurrent.duration.Duration + +/** + * Created by bluejoe on 2020/1/7. + */ +class RpcServerTest { + @Test + def test1(): Unit = { + val client = new ExampleClient("localhost", 1234); + //scalastyle:off println + println(s"square(101)=${client.square(101)}") + } +} + +object StartExampleRpcServer { + def main(args: Array[String]) { + val serverKernel = new NettyRpcServer("0.0.0.0", 1234, "ExampleRPC"); + serverKernel.accept(ExampleRequestHandler()); + serverKernel.start({ + //scalastyle:off println + println(s"rpc server started!"); + }) + } +} + +class ExampleClient(host: String, port: Int) { + val rpcConf = new RpcConf() + val config = RpcEnvClientConfig(rpcConf, "ExampleClient") + val rpcEnv: RpcEnv = NettyRpcEnvFactory.create(config) + + val endPointRef = rpcEnv.setupEndpointRef(RpcAddress(host, port), "ExampleClient") + + def close(): Unit = { + rpcEnv.stop(endPointRef) + } + + def square(x: Double): Double = + Await.result(endPointRef.ask[CalcSquareResponse](CalcSquareRequest(x)), Duration.Inf).y; +} + +case class ExampleRequestHandler() extends RequestHandler { + override val logic: PartialFunction[InternalRpcRequest, InternalRpcResponse] = { + case CalcSquareRequest(x: Double) => + CalcSquareResponse(x * x) + } +} + +case class CalcSquareRequest(x: Double) extends InternalRpcRequest { + +} + +case class CalcSquareResponse(y: Double) extends InternalRpcResponse { + +} \ No newline at end of file diff --git a/server/src/test/scala/ZKResistryTest.scala b/server/src/test/scala/ZKResistryTest.scala new file mode 100644 index 00000000..14758ef9 --- /dev/null +++ b/server/src/test/scala/ZKResistryTest.scala @@ -0,0 +1,78 @@ +import cn.pandadb.network.{NodeAddress, ZKPathConfig} +import cn.pandadb.server.ZKServiceRegistry +import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} +import org.apache.curator.retry.ExponentialBackoffRetry +import org.junit.runners.MethodSorters +import org.junit.{Assert, FixMethodOrder, Test} + +/** + * @Author: Airzihao + * @Description: + * @Date: Created in 14:25 2019/11/26 + * @Modified By: + */ + +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +class ZKResistryTest { + + val localNodeAddress = "10.0.88.11:1111" + val zkServerAddress = "10.0.86.26:2181" + + val ordinaryNodePath = ZKPathConfig.ordinaryNodesPath + s"/" + localNodeAddress + val leaderNodePath = ZKPathConfig.leaderNodePath + s"/" + localNodeAddress + + val curator: CuratorFramework = CuratorFrameworkFactory.newClient(zkServerAddress, + new ExponentialBackoffRetry(1000, 3)); + curator.start() + val ordinadyNodeRegistry = new ZKServiceRegistry(zkServerAddress) + val leaderNodeRegistry = new ZKServiceRegistry(zkServerAddress) + + // no ordinaryNode before registry. + @Test + def test1(): Unit = { + val flag = curator.checkExists().forPath(ordinaryNodePath) + Assert.assertEquals(true, flag == null) + } + + // exist ordinaryNode after registry + @Test + def test2(): Unit = { + ordinadyNodeRegistry.registerAsOrdinaryNode(NodeAddress.fromString(localNodeAddress)) + val flag = curator.checkExists().forPath(ordinaryNodePath) + Assert.assertEquals(true, flag != null) + val ordinaryNodeAddress = curator.getChildren().forPath(ZKPathConfig.ordinaryNodesPath) // returned type is ArrayList[String] + Assert.assertEquals("10.0.88.11:1111", ordinaryNodeAddress.get(0)) + ordinadyNodeRegistry.curator.close() + } + + // ordinaryNode is deleted after curator_session + @Test + def test3(): Unit = { + val flag = curator.checkExists().forPath(ordinaryNodePath) + Assert.assertEquals(true, flag == null) + } + + // no leader node before regisried as leader node + @Test + def test4(): Unit = { + val flag = curator.checkExists().forPath(leaderNodePath) + Assert.assertEquals(true, flag == null) + } + + // exist leader node after registry + @Test + def test5(): Unit = { + leaderNodeRegistry.registerAsLeader(NodeAddress.fromString(localNodeAddress)) + val flag = curator.checkExists().forPath(leaderNodePath) + Assert.assertEquals(true, flag != false) + leaderNodeRegistry.curator.close() + } + + // leader node is deleted after curator session closed. + @Test + def test6(): Unit = { + val flag = curator.checkExists().forPath(leaderNodePath) + Assert.assertEquals(true, flag == null) + } + +} diff --git a/src/blob/java/org/neo4j/bolt/runtime/BoltStateMachineFactoryImpl.java b/src/blob/java/org/neo4j/bolt/runtime/BoltStateMachineFactoryImpl.java deleted file mode 100644 index 16c9f31e..00000000 --- a/src/blob/java/org/neo4j/bolt/runtime/BoltStateMachineFactoryImpl.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.bolt.runtime; - -import java.time.Clock; -import java.time.Duration; - -import org.neo4j.bolt.BoltChannel; -import org.neo4j.bolt.security.auth.Authentication; -import org.neo4j.bolt.v1.BoltProtocolV1; -import org.neo4j.bolt.v1.runtime.BoltStateMachineV1; -import org.neo4j.bolt.v1.runtime.BoltStateMachineV1SPI; -import org.neo4j.bolt.v1.runtime.TransactionStateMachineV1SPI; -import org.neo4j.bolt.v2.BoltProtocolV2; -import org.neo4j.bolt.v3.BoltProtocolV3; -import org.neo4j.bolt.v3.BoltStateMachineV3; -import org.neo4j.bolt.v3.runtime.TransactionStateMachineV3SPI; -import org.neo4j.bolt.v5.BoltProtocolV5; -import org.neo4j.bolt.v5.BoltStateMachineV5; -import org.neo4j.bolt.v5.runtime.TransactionStateMachineV5SPI; -import org.neo4j.dbms.database.DatabaseManager; -import org.neo4j.graphdb.factory.GraphDatabaseSettings; -import org.neo4j.kernel.configuration.Config; -import org.neo4j.kernel.impl.factory.GraphDatabaseFacade; -import org.neo4j.logging.internal.LogService; -import org.neo4j.udc.UsageData; - -public class BoltStateMachineFactoryImpl implements BoltStateMachineFactory -{ - private final DatabaseManager databaseManager; - private final UsageData usageData; - private final LogService logging; - private final Authentication authentication; - private final Config config; - private final Clock clock; - private final String activeDatabaseName; - - public BoltStateMachineFactoryImpl( DatabaseManager databaseManager, UsageData usageData, - Authentication authentication, Clock clock, Config config, LogService logging ) - { - this.databaseManager = databaseManager; - this.usageData = usageData; - this.logging = logging; - this.authentication = authentication; - this.config = config; - this.clock = clock; - this.activeDatabaseName = config.get( GraphDatabaseSettings.active_database ); - } - - @Override - public BoltStateMachine newStateMachine( long protocolVersion, BoltChannel boltChannel ) - { - if ( protocolVersion == BoltProtocolV1.VERSION || protocolVersion == BoltProtocolV2.VERSION ) - { - return newStateMachineV1( boltChannel ); - } - else if ( protocolVersion == BoltProtocolV3.VERSION ) - { - return newStateMachineV3( boltChannel ); - } - else if ( protocolVersion == BoltProtocolV5.VERSION ) - { - return newStateMachineV5( boltChannel ); - } - else - { - throw new IllegalArgumentException( "Failed to create a state machine for protocol version " + protocolVersion ); - } - } - - private BoltStateMachine newStateMachineV1( BoltChannel boltChannel ) - { - TransactionStateMachineSPI transactionSPI = new TransactionStateMachineV1SPI( getActiveDatabase(), boltChannel, getAwaitDuration(), clock ); - BoltStateMachineSPI boltSPI = new BoltStateMachineV1SPI( usageData, logging, authentication, transactionSPI ); - return new BoltStateMachineV1( boltSPI, boltChannel, clock ); - } - - private BoltStateMachine newStateMachineV3( BoltChannel boltChannel ) - { - TransactionStateMachineSPI transactionSPI = new TransactionStateMachineV3SPI( getActiveDatabase(), boltChannel, getAwaitDuration(), clock ); - BoltStateMachineSPI boltSPI = new BoltStateMachineV1SPI( usageData, logging, authentication, transactionSPI ); - return new BoltStateMachineV3( boltSPI, boltChannel, clock ); - } - - private BoltStateMachine newStateMachineV5( BoltChannel boltChannel ) - { - TransactionStateMachineSPI transactionSPI = new TransactionStateMachineV5SPI( getActiveDatabase(), boltChannel, getAwaitDuration(), clock ); - BoltStateMachineSPI boltSPI = new BoltStateMachineV1SPI( usageData, logging, authentication, transactionSPI ); - return new BoltStateMachineV5( boltSPI, boltChannel, clock ); - } - - private Duration getAwaitDuration() - { - long bookmarkReadyTimeout = config.get( GraphDatabaseSettings.bookmark_ready_timeout ).toMillis(); - - return Duration.ofMillis( bookmarkReadyTimeout ); - } - - private GraphDatabaseFacade getActiveDatabase() - { - return databaseManager.getDatabaseFacade( activeDatabaseName ).get(); - } -} diff --git a/src/blob/java/org/neo4j/bolt/transport/DefaultBoltProtocolFactory.java b/src/blob/java/org/neo4j/bolt/transport/DefaultBoltProtocolFactory.java deleted file mode 100644 index 81f9d8c8..00000000 --- a/src/blob/java/org/neo4j/bolt/transport/DefaultBoltProtocolFactory.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.bolt.transport; - -import org.neo4j.bolt.BoltChannel; -import org.neo4j.bolt.BoltProtocol; -import org.neo4j.bolt.runtime.BoltConnectionFactory; -import org.neo4j.bolt.runtime.BoltStateMachineFactory; -import org.neo4j.bolt.v1.BoltProtocolV1; -import org.neo4j.bolt.v2.BoltProtocolV2; -import org.neo4j.bolt.v3.BoltProtocolV3; -import org.neo4j.bolt.v5.BoltProtocolV5; -import org.neo4j.logging.internal.LogService; - -public class DefaultBoltProtocolFactory implements BoltProtocolFactory -{ - private final BoltConnectionFactory connectionFactory; - private final LogService logService; - private final BoltStateMachineFactory stateMachineFactory; - - public DefaultBoltProtocolFactory( BoltConnectionFactory connectionFactory, BoltStateMachineFactory stateMachineFactory, - LogService logService ) - { - this.connectionFactory = connectionFactory; - this.stateMachineFactory = stateMachineFactory; - this.logService = logService; - } - - @Override - public BoltProtocol create( long protocolVersion, BoltChannel channel ) - { - if ( protocolVersion == BoltProtocolV1.VERSION ) - { - return new BoltProtocolV1( channel, connectionFactory, stateMachineFactory, logService ); - } - else if ( protocolVersion == BoltProtocolV2.VERSION ) - { - return new BoltProtocolV2( channel, connectionFactory, stateMachineFactory, logService ); - } - else if ( protocolVersion == BoltProtocolV3.VERSION ) - { - return new BoltProtocolV3( channel, connectionFactory, stateMachineFactory, logService ); - } - else if ( protocolVersion == BoltProtocolV5.VERSION ) - { - return new BoltProtocolV5( channel, connectionFactory, stateMachineFactory, logService ); - } - else - { - return null; - } - } -} diff --git a/src/blob/java/org/neo4j/bolt/v5/BoltProtocolV5.java b/src/blob/java/org/neo4j/bolt/v5/BoltProtocolV5.java deleted file mode 100644 index b6a5a9c4..00000000 --- a/src/blob/java/org/neo4j/bolt/v5/BoltProtocolV5.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.bolt.v5; - -import org.neo4j.bolt.BoltChannel; -import org.neo4j.bolt.messaging.BoltRequestMessageReader; -import org.neo4j.bolt.messaging.Neo4jPack; -import org.neo4j.bolt.runtime.BoltConnection; -import org.neo4j.bolt.runtime.BoltConnectionFactory; -import org.neo4j.bolt.runtime.BoltStateMachineFactory; -import org.neo4j.bolt.v1.messaging.BoltResponseMessageWriterV1; -import org.neo4j.bolt.v3.BoltProtocolV3; -import org.neo4j.bolt.v5.request.BoltRequestMessageReaderV5; -import org.neo4j.bolt.v5.request.messaging.Neo4jPackV5; -import org.neo4j.logging.internal.LogService; - -/** - * Bolt protocol V3. It hosts all the components that are specific to BoltV3 - */ -public class BoltProtocolV5 extends BoltProtocolV3 -{ - public static final long VERSION = 5; - - public BoltProtocolV5( BoltChannel channel, BoltConnectionFactory connectionFactory, BoltStateMachineFactory stateMachineFactory, LogService logging ) - { - super( channel, connectionFactory, stateMachineFactory, logging ); - } - - @Override - protected Neo4jPack createPack() - { - return new Neo4jPackV5(); - } - - @Override - public long version() - { - return VERSION; - } - - @Override - protected BoltRequestMessageReader createMessageReader( BoltChannel channel, Neo4jPack neo4jPack, BoltConnection connection, LogService logging ) - { - BoltResponseMessageWriterV1 responseWriter = new BoltResponseMessageWriterV1( neo4jPack, connection.output(), logging ); - return new BoltRequestMessageReaderV5( connection, responseWriter, logging ); - } -} diff --git a/src/blob/java/org/neo4j/bolt/v5/BoltStateMachineV5.java b/src/blob/java/org/neo4j/bolt/v5/BoltStateMachineV5.java deleted file mode 100644 index 7dfb7cc7..00000000 --- a/src/blob/java/org/neo4j/bolt/v5/BoltStateMachineV5.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.bolt.v5; - -import org.neo4j.bolt.BoltChannel; -import org.neo4j.bolt.runtime.BoltStateMachineSPI; -import org.neo4j.bolt.v3.BoltStateMachineV3; -import org.neo4j.bolt.v3.runtime.ConnectedState; -import org.neo4j.bolt.v3.runtime.FailedState; -import org.neo4j.bolt.v3.runtime.InterruptedState; -import org.neo4j.bolt.v3.runtime.ReadyState; -import org.neo4j.bolt.v3.runtime.StreamingState; -import org.neo4j.bolt.v3.runtime.TransactionReadyState; -import org.neo4j.bolt.v3.runtime.TransactionStreamingState; -import org.neo4j.bolt.v5.runtime.TransactionReadyStateV5; - -import java.time.Clock; - -public class BoltStateMachineV5 extends BoltStateMachineV3 -{ - public BoltStateMachineV5( BoltStateMachineSPI boltSPI, BoltChannel boltChannel, Clock clock ) - { - super( boltSPI, boltChannel, clock ); - } - - @Override - protected States buildStates() - { - ConnectedState connected = new ConnectedState(); - ReadyState ready = new ReadyState(); - StreamingState streaming = new StreamingState(); - - //supoorts blob: TransactionReadyStateV5 - TransactionReadyState txReady = new TransactionReadyStateV5(); - TransactionStreamingState txStreaming = new TransactionStreamingState(); - FailedState failed = new FailedState(); - InterruptedState interrupted = new InterruptedState(); - - connected.setReadyState( ready ); - - ready.setTransactionReadyState( txReady ); - ready.setStreamingState( streaming ); - ready.setFailedState( failed ); - ready.setInterruptedState( interrupted ); - - streaming.setReadyState( ready ); - streaming.setFailedState( failed ); - streaming.setInterruptedState( interrupted ); - - txReady.setReadyState( ready ); - txReady.setTransactionStreamingState( txStreaming ); - txReady.setFailedState( failed ); - txReady.setInterruptedState( interrupted ); - - txStreaming.setReadyState( txReady ); - txStreaming.setFailedState( failed ); - txStreaming.setInterruptedState( interrupted ); - - failed.setInterruptedState( interrupted ); - - interrupted.setReadyState( ready ); - - return new States( connected, failed ); - } - -} diff --git a/src/blob/java/org/neo4j/bolt/v5/request/messaging/Neo4jPackV5.java b/src/blob/java/org/neo4j/bolt/v5/request/messaging/Neo4jPackV5.java deleted file mode 100644 index 99e4a4b9..00000000 --- a/src/blob/java/org/neo4j/bolt/v5/request/messaging/Neo4jPackV5.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.bolt.v5.request.messaging; - -import cn.graiph.blob.Blob; -import org.neo4j.bolt.blob.BoltServerBlobIO; -import org.neo4j.bolt.v1.packstream.PackInput; -import org.neo4j.bolt.v1.packstream.PackOutput; -import org.neo4j.bolt.v2.messaging.Neo4jPackV2; -import org.neo4j.values.AnyValue; - -import java.io.IOException; - -public class Neo4jPackV5 extends Neo4jPackV2 -{ - public static final long VERSION = 5; - - @Override - public Packer newPacker( PackOutput output ) - { - return new PackerV5( output ); - } - - @Override - public Unpacker newUnpacker( PackInput input ) - { - return new UnpackerV5( input ); - } - - @Override - public long version() - { - return VERSION; - } - - private static class PackerV5 extends Neo4jPackV2.PackerV2 - { - PackerV5( PackOutput output ) - { - super( output ); - } - - @Override - public void writeBlob( Blob blob ) throws IOException - { - BoltServerBlobIO.packBlob( blob, out ); - } - } - - private static class UnpackerV5 extends Neo4jPackV2.UnpackerV2 - { - UnpackerV5( PackInput input ) - { - super( input ); - } - - @Override - public AnyValue unpack() throws IOException - { - AnyValue blobValue = BoltServerBlobIO.unpackBlob( this ); - if ( blobValue != null ) - { - return blobValue; - } - return super.unpack(); - } - } -} diff --git a/src/blob/java/org/neo4j/bolt/v5/runtime/TransactionReadyStateV5.java b/src/blob/java/org/neo4j/bolt/v5/runtime/TransactionReadyStateV5.java deleted file mode 100644 index 13d8907e..00000000 --- a/src/blob/java/org/neo4j/bolt/v5/runtime/TransactionReadyStateV5.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.bolt.v5.runtime; - -import org.neo4j.bolt.blob.GetBlobMessage; -import org.neo4j.bolt.messaging.RequestMessage; -import org.neo4j.bolt.runtime.BoltStateMachineState; -import org.neo4j.bolt.runtime.StateMachineContext; -import org.neo4j.bolt.v3.runtime.TransactionReadyState; - -public class TransactionReadyStateV5 extends TransactionReadyState -{ - @Override - public BoltStateMachineState processUnsafe( RequestMessage message, StateMachineContext context ) throws Exception - { - //NOTE: get blob? - if ( message instanceof GetBlobMessage ) - { - ((GetBlobMessage) message).accepts( context ); - return this; - } - - return super.processUnsafe( message, context ); - } -} diff --git a/src/blob/java/org/neo4j/consistency/checking/LabelChainWalker.java b/src/blob/java/org/neo4j/consistency/checking/LabelChainWalker.java deleted file mode 100644 index 0c57ad63..00000000 --- a/src/blob/java/org/neo4j/consistency/checking/LabelChainWalker.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.consistency.checking; - -import org.eclipse.collections.api.map.primitive.MutableLongObjectMap; -import org.eclipse.collections.impl.map.mutable.primitive.LongObjectHashMap; - -import java.util.ArrayList; -import java.util.List; - -import cn.graiph.util.ContextMap; -import org.neo4j.consistency.report.ConsistencyReport; -import org.neo4j.consistency.store.RecordAccess; -import org.neo4j.kernel.impl.InstanceContext; -import org.neo4j.kernel.impl.store.LabelIdArray; -import org.neo4j.kernel.impl.store.PropertyType; -import org.neo4j.kernel.impl.store.record.AbstractBaseRecord; -import org.neo4j.kernel.impl.store.record.DynamicRecord; -import org.neo4j.kernel.impl.store.record.Record; - -import static org.neo4j.kernel.impl.store.AbstractDynamicStore.readFullByteArrayFromHeavyRecords; -import static org.neo4j.kernel.impl.store.DynamicArrayStore.getRightArray; - -public class LabelChainWalker implements - ComparativeRecordChecker -{ - private final Validator validator; - - private final MutableLongObjectMap recordIds = new LongObjectHashMap<>(); - private final List recordList = new ArrayList<>(); - private boolean allInUse = true; - - public LabelChainWalker( Validator validator ) - { - this.validator = validator; - } - - @Override - public void checkReference( RECORD record, DynamicRecord dynamicRecord, - CheckerEngine engine, - RecordAccess records ) - { - recordIds.put( dynamicRecord.getId(), dynamicRecord ); - - if ( dynamicRecord.inUse() ) - { - recordList.add( dynamicRecord ); - } - else - { - allInUse = false; - validator.onRecordNotInUse( dynamicRecord, engine ); - } - - long nextBlock = dynamicRecord.getNextBlock(); - if ( Record.NO_NEXT_BLOCK.is( nextBlock ) ) - { - if ( allInUse ) - { - // only validate label ids if all dynamic records seen were in use - validator.onWellFormedChain( labelIds( InstanceContext.none(), recordList ), engine, records ); - } - } - else - { - final DynamicRecord nextRecord = recordIds.get( nextBlock ); - if ( nextRecord != null ) - { - validator.onRecordChainCycle( nextRecord, engine ); - } - else - { - engine.comparativeCheck( records.nodeLabels( nextBlock ), this ); - } - } - } - - public static long[] labelIds( ContextMap ic, List recordList ) - { - long[] idArray = - (long[]) getRightArray( ic, readFullByteArrayFromHeavyRecords( recordList, PropertyType.ARRAY ) ).asObject(); - return LabelIdArray.stripNodeId( idArray ); - } - - public interface Validator - { - void onRecordNotInUse( DynamicRecord dynamicRecord, CheckerEngine engine ); - void onRecordChainCycle( DynamicRecord record, CheckerEngine engine ); - void onWellFormedChain( long[] labelIds, CheckerEngine engine, RecordAccess records ); - } -} diff --git a/src/blob/java/org/neo4j/consistency/checking/full/NodeLabelReader.java b/src/blob/java/org/neo4j/consistency/checking/full/NodeLabelReader.java deleted file mode 100644 index af118f00..00000000 --- a/src/blob/java/org/neo4j/consistency/checking/full/NodeLabelReader.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.consistency.checking.full; - -import org.eclipse.collections.api.set.primitive.MutableLongSet; -import org.eclipse.collections.impl.set.mutable.primitive.LongHashSet; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.neo4j.collection.PrimitiveLongCollections; -import org.neo4j.consistency.checking.CheckerEngine; -import org.neo4j.consistency.checking.LabelChainWalker; -import org.neo4j.consistency.report.ConsistencyReport; -import org.neo4j.consistency.store.RecordAccess; -import org.neo4j.consistency.store.RecordReference; -import org.neo4j.kernel.impl.InstanceContext; -import org.neo4j.kernel.impl.store.DynamicNodeLabels; -import org.neo4j.kernel.impl.store.InlineNodeLabels; -import org.neo4j.kernel.impl.store.NodeLabels; -import org.neo4j.kernel.impl.store.NodeLabelsField; -import org.neo4j.kernel.impl.store.RecordStore; -import org.neo4j.kernel.impl.store.record.AbstractBaseRecord; -import org.neo4j.kernel.impl.store.record.DynamicRecord; -import org.neo4j.kernel.impl.store.record.NodeRecord; -import org.neo4j.kernel.impl.store.record.Record; - -import static org.neo4j.kernel.impl.store.record.RecordLoad.FORCE; - -public class NodeLabelReader -{ - private NodeLabelReader() - { - } - - public static Set getListOfLabels( - NodeRecord nodeRecord, RecordAccess records, CheckerEngine engine ) - { - final Set labels = new HashSet<>(); - - NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( nodeRecord ); - if ( nodeLabels instanceof DynamicNodeLabels ) - { - - DynamicNodeLabels dynamicNodeLabels = (DynamicNodeLabels) nodeLabels; - long firstRecordId = dynamicNodeLabels.getFirstDynamicRecordId(); - RecordReference firstRecordReference = records.nodeLabels( firstRecordId ); - engine.comparativeCheck( firstRecordReference, - new LabelChainWalker<>( - new LabelChainWalker.Validator() - { - @Override - public void onRecordNotInUse( DynamicRecord dynamicRecord, - CheckerEngine engine ) - { - } - - @Override - public void onRecordChainCycle( DynamicRecord record, - CheckerEngine engine ) - { - } - - @Override - public void onWellFormedChain( long[] labelIds, - CheckerEngine engine, - RecordAccess records ) - { - copyToSet( labelIds, labels ); - } - } ) ); - } - else - { - copyToSet( nodeLabels.get( null ), labels ); - } - - return labels; - } - - public static long[] getListOfLabels( NodeRecord nodeRecord, RecordStore labels ) - { - long field = nodeRecord.getLabelField(); - if ( NodeLabelsField.fieldPointsToDynamicRecordOfLabels( field ) ) - { - List recordList = new ArrayList<>(); - final MutableLongSet alreadySeen = new LongHashSet(); - long id = NodeLabelsField.firstDynamicLabelRecordId( field ); - while ( !Record.NULL_REFERENCE.is( id ) ) - { - DynamicRecord record = labels.getRecord( id, labels.newRecord(), FORCE ); - if ( !record.inUse() || !alreadySeen.add( id ) ) - { - return PrimitiveLongCollections.EMPTY_LONG_ARRAY; - } - recordList.add( record ); - } - return LabelChainWalker.labelIds( InstanceContext.of(labels), recordList ); - } - return InlineNodeLabels.get( nodeRecord ); - } - - public static Set getListOfLabels( long labelField ) - { - final Set labels = new HashSet<>(); - copyToSet( InlineNodeLabels.parseInlined(labelField), labels ); - - return labels; - } - - private static void copyToSet( long[] array, Set set ) - { - for ( long labelId : array ) - { - set.add( labelId ); - } - } -} diff --git a/src/blob/java/org/neo4j/graphdb/factory/GraphDatabaseBuilder.java b/src/blob/java/org/neo4j/graphdb/factory/GraphDatabaseBuilder.java deleted file mode 100644 index 0519d249..00000000 --- a/src/blob/java/org/neo4j/graphdb/factory/GraphDatabaseBuilder.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.graphdb.factory; - -import java.io.File; -import java.io.InputStream; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import javax.annotation.Nonnull; - -import org.neo4j.graphdb.GraphDatabaseService; -import org.neo4j.graphdb.config.Setting; -import org.neo4j.kernel.configuration.Config; - -import static org.neo4j.helpers.collection.MapUtil.stringMap; - -/** - * Builder for {@link GraphDatabaseService}s that allows for setting and loading - * configuration. - */ -public class GraphDatabaseBuilder -{ - /** - * @deprecated This will be moved to an internal package in the future. - */ - @Deprecated - public interface DatabaseCreator - { - /** - * @param config initial configuration for the database. - * @return an instance of {@link GraphDatabaseService}. - * @deprecated this method will go away in 4.0. See {@link #newDatabase(Config)} instead. - */ - @Deprecated - default GraphDatabaseService newDatabase( Map config ) - { - return newDatabase( Config.defaults( config ) ); - } - - /** - * @param config initial configuration for the database. - * @return an instance of {@link GraphDatabaseService}. - */ - default GraphDatabaseService newDatabase( @Nonnull Config config ) - { - return newDatabase( config.getRaw() ); - } - } - - protected DatabaseCreator creator; - protected Map config = new HashMap<>(); - - /** - * @deprecated - */ - @Deprecated - public GraphDatabaseBuilder( DatabaseCreator creator ) - { - this.creator = creator; - } - - /** - * Set a database setting to a particular value. - * - * @param setting Database setting to set - * @param value New value of the setting - * @return the builder - */ - public GraphDatabaseBuilder setConfig( Setting setting, String value ) - { - if ( value == null ) - { - config.remove( setting.name() ); - } - else - { - // Test if we can get this setting with an updated config - Map testValue = stringMap( setting.name(), value ); - setting.apply( key -> testValue.containsKey( key ) ? testValue.get( key ) : config.get( key ) ); - - // No exception thrown, add it to existing config - config.put( setting.name(), value ); - } - return this; - } - - /** - * Set an unvalidated configuration option. - * - * @param name Name of the setting - * @param value New value of the setting - * @return the builder - * @deprecated Use setConfig with explicit {@link Setting} instead. - */ - @Deprecated - public GraphDatabaseBuilder setConfig( String name, String value ) - { - if ( value == null ) - { - config.remove( name ); - } - else - { - config.put( name, value ); - } - return this; - } - - /** - * Set a map of configuration settings into the builder. Overwrites any existing values. - * - * @param config Map of configuration settings - * @return the builder - * @deprecated Use setConfig with explicit {@link Setting} instead - */ - @Deprecated - @SuppressWarnings( "deprecation" ) - public GraphDatabaseBuilder setConfig( Map config ) - { - for ( Map.Entry stringStringEntry : config.entrySet() ) - { - setConfig( stringStringEntry.getKey(), stringStringEntry.getValue() ); - } - return this; - } - - /** - * Load a Properties file from a given file, and add the settings to - * the builder. - * - * @param fileName Filename of properties file to use - * @return the builder - * @throws IllegalArgumentException if the builder was unable to load from the given filename - */ - public GraphDatabaseBuilder loadPropertiesFromFile( String fileName ) - throws IllegalArgumentException - { - try - { - return loadPropertiesFromURL( new File( fileName ).toURI().toURL() ).setConfig("config.file.path", fileName); - } - catch ( MalformedURLException e ) - { - throw new IllegalArgumentException( "Illegal filename:" + fileName, e ); - } - } - - /** - * Load Properties file from a given URL, and add the settings to - * the builder. - * - * @param url URL of properties file to use - * @return the builder - */ - public GraphDatabaseBuilder loadPropertiesFromURL( URL url ) - throws IllegalArgumentException - { - Properties props = new Properties(); - try - { - try ( InputStream stream = url.openStream() ) - { - props.load( stream ); - } - } - catch ( Exception e ) - { - throw new IllegalArgumentException( "Unable to load " + url, e ); - } - Set> entries = props.entrySet(); - for ( Map.Entry entry : entries ) - { - String key = (String) entry.getKey(); - String value = (String) entry.getValue(); - setConfig( key, value ); - } - - return this; - } - - /** - * Create a new database with the configuration registered - * through the builder. - * - * @return an instance of GraphDatabaseService - */ - public GraphDatabaseService newGraphDatabase() - { - return creator.newDatabase( Config.defaults( config ) ); - } - - /** - * @deprecated This will be removed in the future. - */ - @Deprecated - public static class Delegator extends GraphDatabaseBuilder - { - private final GraphDatabaseBuilder actual; - - public Delegator( GraphDatabaseBuilder actual ) - { - super( null ); - this.actual = actual; - } - - @Override - public GraphDatabaseBuilder setConfig( Setting setting, String value ) - { - actual.setConfig( setting, value ); - return this; - } - - @Override - @SuppressWarnings( "deprecation" ) - public GraphDatabaseBuilder setConfig( String name, String value ) - { - actual.setConfig( name, value ); - return this; - } - - @Override - public GraphDatabaseBuilder setConfig( Map config ) - { - actual.setConfig( config ); - return this; - } - - @Override - public GraphDatabaseBuilder loadPropertiesFromFile( String fileName ) throws IllegalArgumentException - { - actual.loadPropertiesFromFile( fileName ); - return this; - } - - @Override - public GraphDatabaseBuilder loadPropertiesFromURL( URL url ) throws IllegalArgumentException - { - actual.loadPropertiesFromURL( url ); - return this; - } - - @Override - public GraphDatabaseService newGraphDatabase() - { - return actual.newGraphDatabase(); - } - } -} diff --git a/src/blob/java/org/neo4j/kernel/configuration/Config.java b/src/blob/java/org/neo4j/kernel/configuration/Config.java deleted file mode 100644 index da4427b0..00000000 --- a/src/blob/java/org/neo4j/kernel/configuration/Config.java +++ /dev/null @@ -1,1020 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.kernel.configuration; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Properties; -import java.util.Set; -import java.util.TreeSet; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.function.BiConsumer; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -import cn.graiph.util.ContextMap; -import org.neo4j.configuration.ConfigOptions; -import org.neo4j.configuration.ConfigValue; -import org.neo4j.configuration.ExternalSettings; -import org.neo4j.configuration.LoadableConfig; -import org.neo4j.configuration.Secret; -import org.neo4j.graphdb.config.BaseSetting; -import org.neo4j.graphdb.config.Configuration; -import org.neo4j.graphdb.config.InvalidSettingException; -import org.neo4j.graphdb.config.Setting; -import org.neo4j.graphdb.config.SettingValidator; -import org.neo4j.graphdb.factory.GraphDatabaseSettings; -import org.neo4j.internal.diagnostics.DiagnosticsPhase; -import org.neo4j.internal.diagnostics.DiagnosticsProvider; -import org.neo4j.kernel.configuration.HttpConnector.Encryption; -import org.neo4j.kernel.impl.util.CopyOnWriteHashMap; -import org.neo4j.logging.BufferingLog; -import org.neo4j.logging.Log; -import org.neo4j.logging.Logger; - -import static java.util.Collections.emptyList; -import static java.util.Collections.singletonMap; -import static org.neo4j.helpers.collection.MapUtil.stringMap; -import static org.neo4j.kernel.configuration.Connector.ConnectorType.BOLT; -import static org.neo4j.kernel.configuration.Connector.ConnectorType.HTTP; -import static org.neo4j.kernel.configuration.HttpConnector.Encryption.NONE; -import static org.neo4j.kernel.configuration.HttpConnector.Encryption.TLS; -import static org.neo4j.kernel.configuration.Settings.TRUE; - -/** - * This class holds the overall configuration of a Neo4j database instance. Use the accessors to convert the internal - * key-value settings to other types. - *

- * Users can assume that old settings have been migrated to their new counterparts, and that defaults have been - * applied. - */ -public class Config implements DiagnosticsProvider, Configuration -{ - public static final String DEFAULT_CONFIG_FILE_NAME = "neo4j.conf"; - - private final List configOptions; - - private final Map params = new CopyOnWriteHashMap<>(); // Read heavy workload - private final Map>> updateListeners = new ConcurrentHashMap<>(); - private final ConfigurationMigrator migrator; - private final List validators = new ArrayList<>(); - private final Map overriddenDefaults = new CopyOnWriteHashMap<>(); - private final Map> settingsMap; // Only contains fixed settings and not groups - - // Messages to this log get replayed into a real logger once logging has been instantiated. - private Log log = new BufferingLog(); - private ContextMap _instanceContext = new ContextMap(); - - public ContextMap getInstanceContext() - { - return _instanceContext; - } - - /** - * Builder class for a configuration. - *

- * The configuration has three layers of values: - *

    - *
  1. Defaults settings, which is provided by validators. - *
  2. File settings, parsed from the configuration file if one is provided. - *
  3. Overridden settings, as provided by the user with the {@link Builder#withSettings(Map)} methods. - *
- * They are added in the order specified, and is thus overridden by each layer. - *

- * Although the builder allows you to override the {@link LoadableConfig}'s with withConfigClasses, - * this functionality is mainly for testing. If no classes are provided to the builder, they will be located through - * service loading, and this is probably what you want in most of the cases. - *

- * Loaded {@link LoadableConfig}'s, whether provided though service loading or explicitly passed, will be scanned - * for validators that provides migration, validation and default values. Migrators can be specified with the - * {@link Migrator} annotation and should reside in a class implementing {@link LoadableConfig}. - */ - public static class Builder - { - private Map initialSettings = stringMap(); - private Map overriddenDefaults = stringMap(); - private List validators = new ArrayList<>(); - private File configFile; - private List settingsClasses; - private boolean connectorsDisabled; - private boolean throwOnFileLoadFailure = true; - - /** - * Augment the configuration with the passed setting. - * - * @param setting The setting to set. - * @param value The value of the setting, pre parsed. - */ - public Builder withSetting( final Setting setting, final String value ) - { - return withSetting( setting.name(), value ); - } - - /** - * Augment the configuration with the passed setting. - * - * @param setting The setting to set. - * @param value The value of the setting, pre parsed. - */ - public Builder withSetting( final String setting, final String value ) - { - initialSettings.put( setting, value ); - return this; - } - - /** - * Augment the configuration with the passed settings. - * - * @param initialSettings settings to augment the configuration with. - */ - public Builder withSettings( final Map initialSettings ) - { - this.initialSettings.putAll( initialSettings ); - return this; - } - - /** - * Set the classes that contains the {@link Setting} fields. If no classes are provided to the builder, they - * will be located through service loading. - * - * @param loadableConfigs A collection fo class instances providing settings. - */ - @Nonnull - public Builder withConfigClasses( final Collection loadableConfigs ) - { - if ( settingsClasses == null ) - { - settingsClasses = new ArrayList<>(); - } - settingsClasses.addAll( loadableConfigs ); - return this; - } - - /** - * Provide an additional validator. Validators are automatically localed within classes with - * {@link LoadableConfig}, but this allows you to add others. - * - * @param validator an additional validator. - */ - @Nonnull - public Builder withValidator( final ConfigurationValidator validator ) - { - this.validators.add( validator ); - return this; - } - - /** - * @see Builder#withValidator(ConfigurationValidator) - */ - @Nonnull - public Builder withValidators( final Collection validators ) - { - this.validators.addAll( validators ); - return this; - } - - /** - * Extends config with defaults for server, i.e. auth and connector settings. - */ - @Nonnull - public Builder withServerDefaults() - { - // Add server defaults - HttpConnector http = new HttpConnector( "http", NONE ); - HttpConnector https = new HttpConnector( "https", TLS ); - BoltConnector bolt = new BoltConnector( "bolt" ); - overriddenDefaults.put( GraphDatabaseSettings.auth_enabled.name(), TRUE ); - overriddenDefaults.put( http.enabled.name(), TRUE ); - overriddenDefaults.put( https.enabled.name(), TRUE ); - overriddenDefaults.put( bolt.enabled.name(), TRUE ); - - return this; - } - - /** - * Provide a file for initial configuration. The settings added with the {@link Builder#withSettings(Map)} - * methods will be applied on top of the settings in the file. - * - * @param configFile A configuration file to parse for initial settings. - */ - @Nonnull - public Builder withFile( final @Nullable File configFile ) - { - this.configFile = configFile; - this.withSetting("config.file.path", configFile.getAbsolutePath()); - return this; - } - - /** - * @see Builder#withFile(File) - */ - @Nonnull - public Builder withFile( final Path configFile ) - { - return withFile( configFile.toFile() ); - } - - /** - * @param configFile an optional configuration file. If not present, this call changes nothing. - */ - @Nonnull - public Builder withFile( Optional configFile ) - { - configFile.ifPresent( file -> this.withFile(file) ); - return this; - } - - /** - * Specifies the neo4j home directory to be set for this particular config. This will modify {@link - * GraphDatabaseSettings#neo4j_home} to the same value as provided. If this is not called, the home directory - * will be set to a system specific default home directory. - * - * @param homeDir The home directory this config belongs to. - */ - @Nonnull - public Builder withHome( final File homeDir ) - { - initialSettings.put( GraphDatabaseSettings.neo4j_home.name(), homeDir.getAbsolutePath() ); - return this; - } - - /** - * @see Builder#withHome(File) - */ - @Nonnull - public Builder withHome( final Path homeDir ) - { - return withHome( homeDir.toFile() ); - } - - /** - * This will force all connectors to be disabled during creation of the config. This can be useful if an - * offline mode is wanted, e.g. in dbms tools or test environments. - */ - @Nonnull - public Builder withConnectorsDisabled() - { - connectorsDisabled = true; - return this; - } - - /** - * Prevent the {@link #build()} method from throwing an {@link UncheckedIOException} if the given {@code withFile} configuration file could not be - * loaded for some reason. Instead, an error will be logged. The defualt behaviour is to throw the exception. - */ - public Builder withNoThrowOnFileLoadFailure() - { - throwOnFileLoadFailure = false; - return this; - } - - /** - * @return The config reflecting the state of the builder. - * @throws InvalidSettingException is thrown if an invalid setting is encountered and {@link - * GraphDatabaseSettings#strict_config_validation} is true. - */ - @Nonnull - public Config build() throws InvalidSettingException - { - List loadableConfigs = - Optional.ofNullable( settingsClasses ).orElseGet( LoadableConfig::allConfigClasses ); - - // If reading from a file, make sure we always have a neo4j_home - if ( configFile != null && !initialSettings.containsKey( GraphDatabaseSettings.neo4j_home.name() ) ) - { - initialSettings.put( GraphDatabaseSettings.neo4j_home.name(), System.getProperty( "user.dir" ) ); - } - - Config config = new Config( configFile, throwOnFileLoadFailure, initialSettings, overriddenDefaults, validators, loadableConfigs ); - - if ( connectorsDisabled ) - { - config.augment( config.allConnectorIdentifiers().stream().collect( - Collectors.toMap( id -> new Connector( id ).enabled.name(), id -> Settings.FALSE ) ) ); - } - - return config; - } - } - - @Nonnull - public static Builder builder() - { - return new Builder(); - } - - /** - * Convenient method for starting building from a file. - */ - @Nonnull - public static Builder fromFile( @Nullable final File configFile ) - { - return builder().withFile( configFile ); - } - - /** - * Convenient method for starting building from a file. - */ - @Nonnull - public static Builder fromFile( @Nonnull final Path configFile ) - { - return builder().withFile( configFile ); - } - - /** - * Convenient method for starting building from initial settings. - */ - @Nonnull - public static Builder fromSettings( final Map initialSettings ) - { - return builder().withSettings( initialSettings ); - } - - /** - * @return a configuration with default values. - */ - @Nonnull - public static Config defaults() - { - return builder().build(); - } - - /** - * @param initialSettings a map with settings to be present in the config. - * @return a configuration with default values augmented with the provided initialSettings. - */ - @Nonnull - public static Config defaults( @Nonnull final Map initialSettings ) - { - return builder().withSettings( initialSettings ).build(); - } - - /** - * Constructs a Config with default values and sets the supplied setting to the value. - * @param setting The initial setting to use. - * @param value The initial value to give the setting. - */ - @Nonnull - public static Config defaults( @Nonnull final Setting setting, final String value ) - { - return builder().withSetting( setting, value ).build(); - } - - private Config( File configFile, - boolean throwOnFileLoadFailure, - Map initialSettings, - Map overriddenDefaults, - Collection additionalValidators, - List settingsClasses ) - { - configOptions = settingsClasses.stream() - .map( LoadableConfig::getConfigOptions ) - .flatMap( List::stream ) - .collect( Collectors.toList() ); - - settingsMap = new HashMap<>(); - configOptions.stream() - .map( ConfigOptions::settingGroup ) - .filter( BaseSetting.class::isInstance ) - .map( BaseSetting.class::cast ) - .forEach( setting -> settingsMap.put( setting.name(), setting ) ); - - validators.addAll( additionalValidators ); - migrator = new AnnotationBasedConfigurationMigrator( settingsClasses ); - this.overriddenDefaults.putAll( overriddenDefaults ); - - boolean fromFile = configFile != null; - if ( fromFile ) - { - loadFromFile( configFile, log, throwOnFileLoadFailure, initialSettings ); - } - - overriddenDefaults.forEach( initialSettings::putIfAbsent ); - - migrateAndValidateAndUpdateSettings( initialSettings, fromFile ); - - // Only warn for deprecations if red from a file - if ( fromFile ) - { - warnAboutDeprecations( params ); - } - } - - /** - * Retrieves a configuration value. If no value is configured, a default value will be returned instead. Note that - * {@code null} is a valid value. - * - * @param setting The configuration property. - * @param the underlying type of the setting. - * @return the value of the given setting, {@code null} can be returned. - */ - @Override - public T get( Setting setting ) - { - return setting.apply( params::get ); - } - - /** - * Test whether a setting is configured or not. Can be used to check if default value will be returned or not. - * - * @param setting The setting to check. - * @return {@code true} if the setting is configures, {@code false} otherwise implying that the default value will - * be returned if applicable. - */ - public boolean isConfigured( Setting setting ) - { - return params.containsKey( setting.name() ); - } - - /** - * Returns the currently configured identifiers for grouped settings. - * - * Identifiers for groups exists to allow multiple configured settings of the same setting type. - * E.g. giving that prefix of a group is {@code dbms.ssl.policy} and the following settings are configured: - *

    - *
  • {@code dbms.ssl.policy.default.base_directory} - *
  • {@code dbms.ssl.policy.other.base_directory} - *
- * a call to this will method return {@code ["default", "other"]}. - *

- * The key difference to these identifiers are that they are only known at runtime after a valid configuration is - * parsed and validated. - * - * @param groupClass A class that represents a setting group. Must be annotated with {@link Group} - * @return A set of configured identifiers for the given group. - * @throws IllegalArgumentException if the provided class is not annotated with {@link Group}. - */ - public Set identifiersFromGroup( Class groupClass ) - { - if ( !groupClass.isAnnotationPresent( Group.class ) ) - { - throw new IllegalArgumentException( "Class must be annotated with @Group" ); - } - - String prefix = groupClass.getAnnotation( Group.class ).value(); - Pattern pattern = Pattern.compile( Pattern.quote( prefix ) + "\\.([^.]+)\\.(.+)" ); - - Set identifiers = new TreeSet<>(); - for ( String setting : params.keySet() ) - { - Matcher matcher = pattern.matcher( setting ); - if ( matcher.matches() ) - { - identifiers.add( matcher.group( 1 ) ); - } - } - return identifiers; - } - - /** - * Augment the existing config with new settings, overriding any conflicting settings, but keeping all old - * non-overlapping ones. - * - * @param settings to add and override. - * @throws InvalidSettingException when and invalid setting is found and {@link - * GraphDatabaseSettings#strict_config_validation} is true. - */ - public void augment( Map settings ) throws InvalidSettingException - { - migrateAndValidateAndUpdateSettings( settings, false ); - } - - /** - * @see Config#augment(Map) - */ - public void augment( String setting, String value ) throws InvalidSettingException - { - augment( singletonMap( setting, value ) ); - } - - /** - * @see Config#augment(Map) - */ - public void augment( Setting setting, String value ) - { - augment( setting.name(), value ); - } - - /** - * Augment the existing config with new settings, overriding any conflicting settings, but keeping all old - * non-overlapping ones. - * - * @param config config to add and override with. - * @throws InvalidSettingException when and invalid setting is found and {@link - * GraphDatabaseSettings#strict_config_validation} is true. - */ - public void augment( Config config ) throws InvalidSettingException - { - augment( config.params ); - } - - /** - * Augment the existing config with new settings, ignoring any conflicting settings. - * - * @param setting settings to add and override - * @throws InvalidSettingException when and invalid setting is found and {@link - * GraphDatabaseSettings#strict_config_validation} is true. - */ - public void augmentDefaults( Setting setting, String value ) throws InvalidSettingException - { - overriddenDefaults.put( setting.name(), value ); - params.putIfAbsent( setting.name(), value ); - } - - /** - * Specify a log where errors and warnings will be reported. Log messages that happens prior to setting a logger - * will be buffered and replayed onto the first logger that is set. - * - * @param log to use. - */ - public void setLogger( Log log ) - { - if ( this.log instanceof BufferingLog ) - { - ((BufferingLog) this.log).replayInto( log ); - } - this.log = log; - } - - /** - * @param key to lookup in the config - * @return the value or none if it doesn't exist in the config - */ - public Optional getRaw( @Nonnull String key ) - { - return Optional.ofNullable( params.get( key ) ); - } - - /** - * @return a copy of the raw configuration map - */ - public Map getRaw() - { - return new HashMap<>( params ); - } - - /** - * @return a configured setting - */ - public Optional getValue( @Nonnull String key ) - { - return configOptions.stream() - .map( it -> it.asConfigValues( params ) ) - .flatMap( List::stream ) - .filter( it -> it.name().equals( key ) ) - .map( ConfigValue::value ) - .findFirst() - .orElseGet( Optional::empty ); - } - - /** - * Updates a provided setting to a given value. This method is intended to be used for changing settings during - * runtime. If you want to change settings at startup, use {@link Config#augment}. - * - * @implNote No migration or config validation is done. If you need this you have to refactor this method. - * - * @param setting The setting to set to the specified value. - * @param update The new value to set, passing {@code null} or the empty string should reset the value back to default value. - * @param origin The source of the change, e.g. {@code dbms.setConfigValue()}. - * @throws IllegalArgumentException if the provided setting is unknown or not dynamic. - * @throws InvalidSettingException if the value is not formatted correctly. - */ - public void updateDynamicSetting( String setting, String update, String origin ) - throws IllegalArgumentException, InvalidSettingException - { - verifyValidDynamicSetting( setting ); - - synchronized ( params ) - { - boolean oldValueIsDefault = false; - boolean newValueIsDefault = false; - String oldValue; - String newValue; - if ( update == null || update.isEmpty() ) - { - // Empty means we want to delete the configured value and fallback to the default value - String overriddenDefault = overriddenDefaults.get( setting ); - boolean hasDefault = overriddenDefault != null; - oldValue = hasDefault ? params.put( setting, overriddenDefault ) : params.remove( setting ); - newValue = getDefaultValueOf( setting ); - newValueIsDefault = true; - } - else - { - // Change setting, make sure it's valid - Map newEntry = stringMap( setting, update ); - List settingValidators = configOptions.stream() - .map( ConfigOptions::settingGroup ) - .collect( Collectors.toList() ); - for ( SettingValidator validator : settingValidators ) - { - validator.validate( newEntry, ignore -> {} ); // Throws if invalid - } - - String previousValue = params.put( setting, update ); - if ( previousValue != null ) - { - oldValue = previousValue; - } - else - { - oldValue = getDefaultValueOf( setting ); - oldValueIsDefault = true; - } - newValue = update; - } - - String oldValueForLog = obsfucateIfSecret( setting, oldValue ); - String newValueForLog = obsfucateIfSecret( setting, newValue ); - log.info( "Setting changed: '%s' changed from '%s' to '%s' via '%s'", - setting, oldValueIsDefault ? "default (" + oldValueForLog + ")" : oldValueForLog, - newValueIsDefault ? "default (" + newValueForLog + ")" : newValueForLog, origin ); - updateListeners.getOrDefault( setting, emptyList() ).forEach( l -> l.accept( oldValue, newValue ) ); - } - } - - private void verifyValidDynamicSetting( String setting ) - { - Optional option = findConfigValue( setting ); - - if ( !option.isPresent() ) - { - throw new IllegalArgumentException( "Unknown setting: " + setting ); - } - - ConfigValue configValue = option.get(); - if ( !configValue.dynamic() ) - { - throw new IllegalArgumentException( "Setting is not dynamic and can not be changed at runtime" ); - } - } - - private String getDefaultValueOf( String setting ) - { - if ( overriddenDefaults.containsKey( setting ) ) - { - return overriddenDefaults.get( setting ); - } - if ( settingsMap.containsKey( setting ) ) - { - return settingsMap.get( setting ).getDefaultValue(); - } - return ""; - } - - private Optional findConfigValue( String setting ) - { - return configOptions.stream().map( it -> it.asConfigValues( params ) ).flatMap( List::stream ) - .filter( it -> it.name().equals( setting ) ).findFirst(); - } - - /** - * Register a listener for dynamic updates to the given setting. - *

- * The listener will get called whenever the {@link #updateDynamicSetting(String, String, String)} method is used - * to change the given setting, and the listener will be supplied the parsed values of the old and the new - * configuration value. - * - * @param setting The {@link Setting} to listen for changes to. - * @param listener The listener callback that will be notified of any configuration changes to the given setting. - * @param The value type of the setting. - */ - public void registerDynamicUpdateListener( Setting setting, BiConsumer listener ) - { - String settingName = setting.name(); - verifyValidDynamicSetting( settingName ); - BiConsumer projectedListener = ( oldValStr, newValStr ) -> - { - try - { - V oldVal = setting.apply( s -> oldValStr ); - V newVal = setting.apply( s -> newValStr ); - listener.accept( oldVal, newVal ); - } - catch ( Exception e ) - { - log.error( "Failure when notifying listeners after dynamic setting change; " + - "new setting might not have taken effect: " + e.getMessage(), e ); - } - }; - updateListeners.computeIfAbsent( settingName, k -> new ConcurrentLinkedQueue<>() ).add( projectedListener ); - } - - /** - * @return all effective config values - */ - public Map getConfigValues() - { - return configOptions.stream() - .map( it -> it.asConfigValues( params ) ) - .flatMap( List::stream ) - .collect( Collectors.toMap( ConfigValue::name, it -> it, ( val1, val2 ) -> - { - throw new RuntimeException( "Duplicate setting: " + val1.name() + ": " + val1 + " and " + val2 ); - } ) ); - } - - @Override - public String getDiagnosticsIdentifier() - { - return getClass().getName(); - } - - @Override - public void acceptDiagnosticsVisitor( Object visitor ) - { - // nothing visits configuration - } - - @Override - public void dump( DiagnosticsPhase phase, Logger logger ) - { - if ( phase.isInitialization() || phase.isExplicitlyRequested() ) - { - logger.log( "Neo4j Kernel properties:" ); - for ( Map.Entry param : params.entrySet() ) - { - logger.log( "%s=%s", param.getKey(), obsfucateIfSecret( param ) ); - } - } - } - - private String obsfucateIfSecret( Map.Entry param ) - { - return obsfucateIfSecret( param.getKey(), param.getValue() ); - } - - private String obsfucateIfSecret( String key, String value ) - { - if ( settingsMap.containsKey( key ) && settingsMap.get( key ).secret() ) - { - return Secret.OBSFUCATED; - } - else - { - return value; - } - } - - /** - * Migrates and validates all string values in the provided settings map. - * - * This will update the configuration with the provided values regardless whether errors are encountered or not. - * - * @param settings the settings to migrate and validate. - * @param warnOnUnknownSettings if true method log messages to {@link Config#log}. - * @throws InvalidSettingException when and invalid setting is found and {@link - * GraphDatabaseSettings#strict_config_validation} is true. - */ - private void migrateAndValidateAndUpdateSettings( Map settings, boolean warnOnUnknownSettings ) - throws InvalidSettingException - { - Map migratedSettings = migrateSettings( settings ); - params.putAll( migratedSettings ); - - List settingValidators = configOptions.stream() - .map( ConfigOptions::settingGroup ) - .collect( Collectors.toList() ); - - // Validate settings - Map additionalSettings = - new IndividualSettingsValidator( settingValidators, warnOnUnknownSettings ).validate( this, log ); - params.putAll( additionalSettings ); - - // Validate configuration - for ( ConfigurationValidator validator : validators ) - { - validator.validate( this, log ); - } - } - - private Map migrateSettings( Map settings ) - { - return migrator.apply( settings, log ); - } - - private void warnAboutDeprecations( Map userSettings ) - { - configOptions.stream() - .flatMap( it -> it.asConfigValues( userSettings ).stream() ) - .filter( config -> userSettings.containsKey( config.name() ) && config.deprecated() ) - .forEach( c -> - { - if ( c.replacement().isPresent() ) - { - log.warn( "%s is deprecated. Replaced by %s", c.name(), c.replacement().get() ); - } - else - { - log.warn( "%s is deprecated.", c.name() ); - } - } ); - } - - @Nonnull - private static void loadFromFile( @Nonnull File file, @Nonnull Log log, boolean throwOnFileLoadFailure, Map into ) - { - if ( !file.exists() ) - { - if ( throwOnFileLoadFailure ) - { - throw new ConfigLoadIOException( new IOException( "Config file [" + file + "] does not exist." ) ); - } - log.warn( "Config file [%s] does not exist.", file ); - return; - } - try - { - @SuppressWarnings( "MismatchedQueryAndUpdateOfCollection" ) - Properties loader = new Properties() - { - @Override - public Object put( Object key, Object val ) - { - String setting = key.toString(); - String value = val.toString(); - // We use the 'super' Hashtable as a set of all the settings we have logged warnings about. - // We only want to warn about each duplicate setting once. - if ( into.putIfAbsent( setting, value ) != null && - super.put( key, val ) == null && - !key.equals( ExternalSettings.additionalJvm.name() ) ) - { - log.warn( "The '%s' setting is specified more than once. Settings only be specified once, to avoid ambiguity. " + - "The setting value that will be used is '%s'.", setting, into.get( setting ) ); - } - return null; - } - }; - try ( FileInputStream stream = new FileInputStream( file ) ) - { - loader.load( stream ); - } - } - catch ( IOException e ) - { - if ( throwOnFileLoadFailure ) - { - throw new ConfigLoadIOException( "Unable to load config file [" + file + "].", e ); - } - log.error( "Unable to load config file [%s]: %s", file, e.getMessage() ); - } - } - - /** - * @return a list of all connector names like 'http' in 'dbms.connector.http.enabled = true' - */ - @Nonnull - public Set allConnectorIdentifiers() - { - return allConnectorIdentifiers( params ); - } - - /** - * @return a list of all connector names like 'http' in 'dbms.connector.http.enabled = true' - */ - @Nonnull - public Set allConnectorIdentifiers( @Nonnull Map params ) - { - return identifiersFromGroup( Connector.class ); - } - - /** - * @return list of all configured bolt connectors - */ - @Nonnull - public List boltConnectors() - { - return boltConnectors( params ).collect( Collectors.toList() ); - } - - /** - * @return stream of all configured bolt connectors - */ - @Nonnull - private Stream boltConnectors( @Nonnull Map params ) - { - return allConnectorIdentifiers( params ).stream().map( BoltConnector::new ).filter( - c -> c.group.groupKey.equalsIgnoreCase( "bolt" ) || BOLT == c.type.apply( params::get ) ); - } - - /** - * @return list of all configured bolt connectors which are enabled - */ - @Nonnull - public List enabledBoltConnectors() - { - return enabledBoltConnectors( params ); - } - - /** - * @return list of all configured bolt connectors which are enabled - */ - @Nonnull - public List enabledBoltConnectors( @Nonnull Map params ) - { - return boltConnectors( params ) - .filter( c -> c.enabled.apply( params::get ) ) - .collect( Collectors.toList() ); - } - - /** - * @return list of all configured http connectors - */ - @Nonnull - public List httpConnectors() - { - return httpConnectors( params ).collect( Collectors.toList() ); - } - - /** - * @return stream of all configured http connectors - */ - @Nonnull - private Stream httpConnectors( @Nonnull Map params ) - { - return allConnectorIdentifiers( params ).stream() - .map( Connector::new ) - .filter( c -> c.group.groupKey.equalsIgnoreCase( "http" ) || - c.group.groupKey.equalsIgnoreCase( "https" ) || - HTTP == c.type.apply( params::get ) ) - .map( c -> - { - final String name = c.group.groupKey; - final Encryption defaultEncryption; - switch ( name ) - { - case "https": - defaultEncryption = TLS; - break; - case "http": - default: - defaultEncryption = NONE; - break; - } - - return new HttpConnector( name, - HttpConnectorValidator.encryptionSetting( name, defaultEncryption ).apply( params::get ) ); - } ); - } - - /** - * @return list of all configured http connectors which are enabled - */ - @Nonnull - public List enabledHttpConnectors() - { - return enabledHttpConnectors( params ); - } - - /** - * @return list of all configured http connectors which are enabled - */ - @Nonnull - private List enabledHttpConnectors( @Nonnull Map params ) - { - return httpConnectors( params ) - .filter( c -> c.enabled.apply( params::get ) ) - .collect( Collectors.toList() ); - } - - @Override - public String toString() - { - return params.entrySet().stream() - .sorted( Comparator.comparing( Map.Entry::getKey ) ) - .map( entry -> entry.getKey() + "=" + obsfucateIfSecret( entry ) ) - .collect( Collectors.joining( ", ") ); - } -} diff --git a/src/blob/java/org/neo4j/kernel/impl/store/DynamicNodeLabels.java b/src/blob/java/org/neo4j/kernel/impl/store/DynamicNodeLabels.java deleted file mode 100644 index 09828c41..00000000 --- a/src/blob/java/org/neo4j/kernel/impl/store/DynamicNodeLabels.java +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.kernel.impl.store; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; - -import org.neo4j.helpers.collection.Iterables; -import org.neo4j.helpers.collection.Pair; -import org.neo4j.kernel.impl.InstanceContext; -import org.neo4j.kernel.impl.store.allocator.ReusableRecordsCompositeAllocator; -import org.neo4j.kernel.impl.store.record.DynamicRecord; -import org.neo4j.kernel.impl.store.record.NodeRecord; - -import static java.lang.String.format; -import static org.neo4j.kernel.impl.store.AbstractDynamicStore.readFullByteArrayFromHeavyRecords; -import static org.neo4j.kernel.impl.store.DynamicArrayStore.getRightArray; -import static org.neo4j.kernel.impl.store.LabelIdArray.filter; -import static org.neo4j.kernel.impl.store.LabelIdArray.stripNodeId; -import static org.neo4j.kernel.impl.store.NodeLabelsField.fieldPointsToDynamicRecordOfLabels; -import static org.neo4j.kernel.impl.store.NodeLabelsField.firstDynamicLabelRecordId; -import static org.neo4j.kernel.impl.store.NodeLabelsField.parseLabelsBody; -import static org.neo4j.kernel.impl.store.PropertyType.ARRAY; - -public class DynamicNodeLabels implements NodeLabels -{ - private final NodeRecord node; - - public DynamicNodeLabels( NodeRecord node ) - { - this.node = node; - } - - @Override - public long[] get( NodeStore nodeStore ) - { - return get( node, nodeStore ); - } - - public static long[] get( NodeRecord node, NodeStore nodeStore ) - { - if ( node.isLight() ) - { - nodeStore.ensureHeavy( node, firstDynamicLabelRecordId( node.getLabelField() ) ); - } - return getDynamicLabelsArray( node.getUsedDynamicLabelRecords(), nodeStore.getDynamicLabelStore() ); - } - - @Override - public long[] getIfLoaded() - { - if ( node.isLight() ) - { - return null; - } - return stripNodeId( (long[]) getRightArray( InstanceContext.none(), readFullByteArrayFromHeavyRecords( - node.getUsedDynamicLabelRecords(), ARRAY ) ).asObject() ); - } - - @Override - public Collection put( long[] labelIds, NodeStore nodeStore, DynamicRecordAllocator allocator ) - { - Arrays.sort( labelIds ); - return putSorted( node, labelIds, nodeStore, allocator ); - } - - static Collection putSorted( NodeRecord node, long[] labelIds, NodeStore nodeStore, DynamicRecordAllocator allocator ) - { - long existingLabelsField = node.getLabelField(); - long existingLabelsBits = parseLabelsBody( existingLabelsField ); - - Collection changedDynamicRecords = node.getDynamicLabelRecords(); - - long labelField = node.getLabelField(); - if ( fieldPointsToDynamicRecordOfLabels( labelField ) ) - { - // There are existing dynamic label records, get them - nodeStore.ensureHeavy( node, existingLabelsBits ); - changedDynamicRecords = node.getDynamicLabelRecords(); - setNotInUse( changedDynamicRecords ); - } - - if ( !InlineNodeLabels.tryInlineInNodeRecord( node, labelIds, changedDynamicRecords ) ) - { - Iterator recycledRecords = changedDynamicRecords.iterator(); - Collection allocatedRecords = allocateRecordsForDynamicLabels( node.getId(), labelIds, - new ReusableRecordsCompositeAllocator( recycledRecords, allocator ) ); - // Set the rest of the previously set dynamic records as !inUse - while ( recycledRecords.hasNext() ) - { - DynamicRecord removedRecord = recycledRecords.next(); - removedRecord.setInUse( false ); - allocatedRecords.add( removedRecord ); - } - node.setLabelField( dynamicPointer( allocatedRecords ), allocatedRecords ); - changedDynamicRecords = allocatedRecords; - } - - return changedDynamicRecords; - } - - @Override - public Collection add( long labelId, NodeStore nodeStore, DynamicRecordAllocator allocator ) - { - nodeStore.ensureHeavy( node, firstDynamicLabelRecordId( node.getLabelField() ) ); - long[] existingLabelIds = getDynamicLabelsArray( node.getUsedDynamicLabelRecords(), - nodeStore.getDynamicLabelStore() ); - long[] newLabelIds = LabelIdArray.concatAndSort( existingLabelIds, labelId ); - Collection existingRecords = node.getDynamicLabelRecords(); - Collection changedDynamicRecords = allocateRecordsForDynamicLabels( node.getId(), newLabelIds, - new ReusableRecordsCompositeAllocator( existingRecords, allocator ) ); - node.setLabelField( dynamicPointer( changedDynamicRecords ), changedDynamicRecords ); - return changedDynamicRecords; - } - - @Override - public Collection remove( long labelId, NodeStore nodeStore ) - { - nodeStore.ensureHeavy( node, firstDynamicLabelRecordId( node.getLabelField() ) ); - long[] existingLabelIds = getDynamicLabelsArray( node.getUsedDynamicLabelRecords(), - nodeStore.getDynamicLabelStore() ); - long[] newLabelIds = filter( existingLabelIds, labelId ); - Collection existingRecords = node.getDynamicLabelRecords(); - if ( InlineNodeLabels.tryInlineInNodeRecord( node, newLabelIds, existingRecords ) ) - { - setNotInUse( existingRecords ); - } - else - { - Collection newRecords = allocateRecordsForDynamicLabels( node.getId(), newLabelIds, - new ReusableRecordsCompositeAllocator( existingRecords, nodeStore.getDynamicLabelStore() ) ); - node.setLabelField( dynamicPointer( newRecords ), existingRecords ); - if ( !newRecords.equals( existingRecords ) ) - { // One less dynamic record, mark that one as not in use - for ( DynamicRecord record : existingRecords ) - { - if ( !newRecords.contains( record ) ) - { - record.setInUse( false ); - } - } - } - } - return existingRecords; - } - - public long getFirstDynamicRecordId() - { - return firstDynamicLabelRecordId( node.getLabelField() ); - } - - public static long dynamicPointer( Collection newRecords ) - { - return 0x8000000000L | Iterables.first( newRecords ).getId(); - } - - private static void setNotInUse( Collection changedDynamicRecords ) - { - for ( DynamicRecord record : changedDynamicRecords ) - { - record.setInUse( false ); - } - } - - @Override - public boolean isInlined() - { - return false; - } - - @Override - public String toString() - { - if ( node.isLight() ) - { - return format( "Dynamic(id:%d)", firstDynamicLabelRecordId( node.getLabelField() ) ); - } - return format( "Dynamic(id:%d,[%s])", firstDynamicLabelRecordId( node.getLabelField() ), - Arrays.toString( getDynamicLabelsArrayFromHeavyRecords( node.getUsedDynamicLabelRecords() ) ) ); - } - - public static Collection allocateRecordsForDynamicLabels( long nodeId, long[] labels, - AbstractDynamicStore dynamicLabelStore ) - { - return allocateRecordsForDynamicLabels( nodeId, labels, (DynamicRecordAllocator)dynamicLabelStore ); - } - - public static Collection allocateRecordsForDynamicLabels( long nodeId, long[] labels, - DynamicRecordAllocator allocator ) - { - long[] storedLongs = LabelIdArray.prependNodeId( nodeId, labels ); - Collection records = new ArrayList<>(); - // since we can't store points in long array we passing false as possibility to store points - DynamicArrayStore.allocateRecords( records, storedLongs, allocator, false ); - return records; - } - - public static long[] getDynamicLabelsArray( Iterable records, - AbstractDynamicStore dynamicLabelStore ) - { - long[] storedLongs = (long[]) - DynamicArrayStore.getRightArray( InstanceContext.of( dynamicLabelStore ), - dynamicLabelStore.readFullByteArray( records, PropertyType.ARRAY ) ).asObject(); - return LabelIdArray.stripNodeId( storedLongs ); - } - - public static long[] getDynamicLabelsArrayFromHeavyRecords( Iterable records ) - { - long[] storedLongs = (long[]) - DynamicArrayStore.getRightArray( InstanceContext.none(), readFullByteArrayFromHeavyRecords( records, PropertyType.ARRAY ) ).asObject(); - return LabelIdArray.stripNodeId( storedLongs ); - } - - public static Pair getDynamicLabelsArrayAndOwner( Iterable records, - AbstractDynamicStore dynamicLabelStore ) - { - long[] storedLongs = (long[]) - DynamicArrayStore.getRightArray( InstanceContext.of( dynamicLabelStore ), - dynamicLabelStore.readFullByteArray( records, PropertyType.ARRAY ) ).asObject(); - return Pair.of(storedLongs[0], LabelIdArray.stripNodeId( storedLongs )); - } -} diff --git a/src/blob/java/org/neo4j/kernel/impl/store/GeometryType.java b/src/blob/java/org/neo4j/kernel/impl/store/GeometryType.java deleted file mode 100644 index ae085086..00000000 --- a/src/blob/java/org/neo4j/kernel/impl/store/GeometryType.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.kernel.impl.store; - -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; - -import cn.graiph.util.ContextMap; -import org.neo4j.helpers.collection.Pair; -import org.neo4j.kernel.impl.store.format.standard.StandardFormatSettings; -import org.neo4j.kernel.impl.store.record.PropertyBlock; -import org.neo4j.values.storable.ArrayValue; -import org.neo4j.values.storable.CoordinateReferenceSystem; -import org.neo4j.values.storable.FloatingPointArray; -import org.neo4j.values.storable.PointValue; -import org.neo4j.values.storable.Value; -import org.neo4j.values.storable.Values; - -/** - * For the PropertyStore format, check {@link PropertyStore}. - * For the array format, check {@link DynamicArrayStore}. - */ -public enum GeometryType -{ - GEOMETRY_INVALID( 0, "Invalid" ) - { - @Override - public Value decode( CoordinateReferenceSystem crs, int dimension, long[] valueBlocks, int offset ) - { - throw new UnsupportedOperationException( "Cannot decode invalid geometry" ); - } - - @Override - public int calculateNumberOfBlocksUsedForGeometry( long firstBlock ) - { - return PropertyType.BLOCKS_USED_FOR_BAD_TYPE_OR_ENCODING; - } - - @Override - public ArrayValue decodeArray( ContextMap ic, GeometryHeader header, byte[] data ) - { - throw new UnsupportedOperationException( "Cannot decode invalid geometry array" ); - } - }, - GEOMETRY_POINT( 1, "Point" ) - { - @Override - public Value decode( CoordinateReferenceSystem crs, int dimension, long[] valueBlocks, int offset ) - { - double[] coordinate = new double[dimension]; - for ( int i = 0; i < dimension; i++ ) - { - coordinate[i] = Double.longBitsToDouble( valueBlocks[i + 1 + offset] ); - } - return Values.pointValue( crs, coordinate ); - } - - @Override - public int calculateNumberOfBlocksUsedForGeometry( long firstBlock ) - { - int dimension = getDimension( firstBlock ); - if ( dimension > GeometryType.getMaxSupportedDimensions() ) - { - return PropertyType.BLOCKS_USED_FOR_BAD_TYPE_OR_ENCODING; - } - return 1 + dimension; - } - - @Override - public ArrayValue decodeArray( ContextMap ic, GeometryHeader header, byte[] data ) - { - byte[] dataHeader = PropertyType.ARRAY.readDynamicRecordHeader( data ); - byte[] dataBody = new byte[data.length - dataHeader.length]; - System.arraycopy( data, dataHeader.length, dataBody, 0, dataBody.length ); - Value dataValue = DynamicArrayStore.getRightArray( ic, Pair.of( dataHeader, dataBody ) ); - if ( dataValue instanceof FloatingPointArray ) - { - FloatingPointArray numbers = (FloatingPointArray) dataValue; - PointValue[] points = new PointValue[numbers.length() / header.dimension]; - for ( int i = 0; i < points.length; i++ ) - { - double[] coords = new double[header.dimension]; - for ( int d = 0; d < header.dimension; d++ ) - { - coords[d] = numbers.doubleValue( i * header.dimension + d ); - } - points[i] = Values.pointValue( header.crs, coords ); - } - return Values.pointArray( points ); - } - else - { - throw new InvalidRecordException( - "Point array with unexpected type. Actual:" + dataValue.getClass().getSimpleName() + ". Expected: FloatingPointArray." ); - } - } - }; - - /** - * Handler for header information for Geometry objects and arrays of Geometry objects - */ - public static class GeometryHeader - { - private final int geometryType; - private final int dimension; - private final CoordinateReferenceSystem crs; - - private GeometryHeader( int geometryType, int dimension, CoordinateReferenceSystem crs ) - { - this.geometryType = geometryType; - this.dimension = dimension; - this.crs = crs; - } - - private GeometryHeader( int geometryType, int dimension, int crsTableId, int crsCode ) - { - this( geometryType, dimension, CoordinateReferenceSystem.get( crsTableId, crsCode ) ); - } - - private void writeArrayHeaderTo( byte[] bytes ) - { - bytes[0] = (byte) PropertyType.GEOMETRY.intValue(); - bytes[1] = (byte) geometryType; - bytes[2] = (byte) dimension; - bytes[3] = (byte) crs.getTable().getTableId(); - bytes[4] = (byte) (crs.getCode() >> 8 & 0xFFL); - bytes[5] = (byte) (crs.getCode() & 0xFFL); - } - - static GeometryHeader fromArrayHeaderBytes( byte[] header ) - { - int geometryType = Byte.toUnsignedInt( header[1] ); - int dimension = Byte.toUnsignedInt( header[2] ); - int crsTableId = Byte.toUnsignedInt( header[3] ); - int crsCode = (Byte.toUnsignedInt( header[4] ) << 8) + Byte.toUnsignedInt( header[5] ); - return new GeometryHeader( geometryType, dimension, crsTableId, crsCode ); - } - - public static GeometryHeader fromArrayHeaderByteBuffer( ByteBuffer buffer ) - { - int geometryType = Byte.toUnsignedInt( buffer.get() ); - int dimension = Byte.toUnsignedInt( buffer.get() ); - int crsTableId = Byte.toUnsignedInt( buffer.get() ); - int crsCode = (Byte.toUnsignedInt( buffer.get() ) << 8) + Byte.toUnsignedInt( buffer.get() ); - return new GeometryHeader( geometryType, dimension, crsTableId, crsCode ); - } - } - - private static final GeometryType[] TYPES = GeometryType.values(); - private static final Map all = new HashMap<>( TYPES.length ); - - static - { - for ( GeometryType geometryType : TYPES ) - { - all.put( geometryType.name, geometryType ); - } - } - - private static final long GEOMETRY_TYPE_MASK = 0x00000000F0000000L; - private static final long DIMENSION_MASK = 0x0000000F00000000L; - private static final long CRS_TABLE_MASK = 0x000000F000000000L; - private static final long CRS_CODE_MASK = 0x00FFFF0000000000L; - private static final long PRECISION_MASK = 0x0100000000000000L; - - private static int getGeometryType( long firstBlock ) - { - return (int) ((firstBlock & GEOMETRY_TYPE_MASK) >> 28); - } - - private static int getDimension( long firstBlock ) - { - return (int) ((firstBlock & DIMENSION_MASK) >> 32); - } - - private static int getCRSTable( long firstBlock ) - { - return (int) ((firstBlock & CRS_TABLE_MASK) >> 36); - } - - private static int getCRSCode( long firstBlock ) - { - return (int) ((firstBlock & CRS_CODE_MASK) >> 40); - } - - private static boolean isFloatPrecision( long firstBlock ) - { - return ((firstBlock & PRECISION_MASK) >> 56) == 1; - } - - private static int getMaxSupportedDimensions() - { - return PropertyType.getPayloadSizeLongs() - 1; - } - - public static int calculateNumberOfBlocksUsed( long firstBlock ) - { - GeometryType geometryType = find( getGeometryType( firstBlock ) ); - return geometryType.calculateNumberOfBlocksUsedForGeometry( firstBlock ); - } - - private static GeometryType find( int gtype ) - { - if ( gtype < TYPES.length && gtype >= 0 ) - { - return TYPES[gtype]; - } - else - { - // Kernel code requires no exceptions in deeper PropertyChain processing of corrupt/invalid data - return GEOMETRY_INVALID; - } - } - - public static Value decode( PropertyBlock block ) - { - return decode( block.getValueBlocks(), 0 ); - } - - public static Value decode( long[] valueBlocks, int offset ) - { - long firstBlock = valueBlocks[offset]; - int gtype = getGeometryType( firstBlock ); - int dimension = getDimension( firstBlock ); - - if ( isFloatPrecision( firstBlock ) ) - { - throw new UnsupportedOperationException( "Float precision is unsupported in Geometry properties" ); - } - if ( dimension > GeometryType.getMaxSupportedDimensions() ) - { - throw new UnsupportedOperationException( - "Points with more than " + GeometryType.getMaxSupportedDimensions() + - " dimensions are not supported" ); - } - CoordinateReferenceSystem crs = CoordinateReferenceSystem.get( getCRSTable( firstBlock ), getCRSCode( firstBlock ) ); - return find( gtype ).decode( crs, dimension, valueBlocks, offset ); - } - - public static long[] encodePoint( int keyId, CoordinateReferenceSystem crs, double[] coordinate ) - { - if ( coordinate.length > GeometryType.getMaxSupportedDimensions() ) - { - // One property block can only contains at most 4x8 byte parts, one for header and 3 for coordinates - throw new UnsupportedOperationException( - "Points with more than " + GeometryType.getMaxSupportedDimensions() + - " dimensions are not supported" ); - } - - int idBits = StandardFormatSettings.PROPERTY_TOKEN_MAXIMUM_ID_BITS; - - long keyAndType = keyId | (((long) (PropertyType.GEOMETRY.intValue()) << idBits)); - long gtypeBits = GeometryType.GEOMETRY_POINT.gtype << (idBits + 4); - long dimensionBits = ((long) coordinate.length) << (idBits + 8); - long crsTableIdBits = ((long) crs.getTable().getTableId()) << (idBits + 12); - long crsCodeBits = ((long) crs.getCode()) << (idBits + 16); - - long[] data = new long[1 + coordinate.length]; - data[0] = keyAndType | gtypeBits | dimensionBits | crsTableIdBits | crsCodeBits; - for ( int i = 0; i < coordinate.length; i++ ) - { - data[1 + i] = Double.doubleToLongBits( coordinate[i] ); - } - return data; - } - - public static byte[] encodePointArray( PointValue[] points ) - { - int dimension = points[0].coordinate().length; - CoordinateReferenceSystem crs = points[0].getCoordinateReferenceSystem(); - for ( int i = 1; i < points.length; i++ ) - { - if ( dimension != points[i].coordinate().length ) - { - throw new IllegalArgumentException( - "Attempting to store array of points with inconsistent dimension. Point " + i + " has a different dimension." ); - } - if ( !crs.equals( points[i].getCoordinateReferenceSystem() ) ) - { - throw new IllegalArgumentException( "Attempting to store array of points with inconsistent CRS. Point " + i + " has a different CRS." ); - } - } - - double[] data = new double[points.length * dimension]; - for ( int i = 0; i < data.length; i++ ) - { - data[i] = points[i / dimension].coordinate()[i % dimension]; - } - GeometryHeader geometryHeader = new GeometryHeader( GeometryType.GEOMETRY_POINT.gtype, dimension, crs ); - byte[] bytes = DynamicArrayStore.encodeFromNumbers( data, DynamicArrayStore.GEOMETRY_HEADER_SIZE ); - geometryHeader.writeArrayHeaderTo( bytes ); - return bytes; - } - - public static ArrayValue decodeGeometryArray( ContextMap ic, GeometryHeader header, byte[] data ) - { - return find( header.geometryType ).decodeArray( ic, header, data ); - } - - private final int gtype; - private final String name; - - GeometryType( int gtype, String name ) - { - this.gtype = gtype; - this.name = name; - } - - public abstract Value decode( CoordinateReferenceSystem crs, int dimension, long[] valueBlocks, int offset ); - - public abstract int calculateNumberOfBlocksUsedForGeometry( long firstBlock ); - - public abstract ArrayValue decodeArray( ContextMap ic, GeometryHeader header, byte[] data ); - - public int getGtype() - { - return gtype; - } - - public String getName() - { - return name; - } -} diff --git a/src/blob/java/org/neo4j/kernel/impl/store/TemporalType.java b/src/blob/java/org/neo4j/kernel/impl/store/TemporalType.java deleted file mode 100644 index 2d7b0b7a..00000000 --- a/src/blob/java/org/neo4j/kernel/impl/store/TemporalType.java +++ /dev/null @@ -1,721 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.kernel.impl.store; - -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.time.OffsetTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.time.temporal.ChronoUnit; -import java.util.HashMap; -import java.util.Map; - -import cn.graiph.util.ContextMap; -import org.neo4j.helpers.collection.Pair; -import org.neo4j.kernel.impl.store.format.standard.StandardFormatSettings; -import org.neo4j.kernel.impl.store.record.PropertyBlock; -import org.neo4j.values.storable.ArrayValue; -import org.neo4j.values.storable.DateTimeValue; -import org.neo4j.values.storable.DateValue; -import org.neo4j.values.storable.DurationValue; -import org.neo4j.values.storable.LocalDateTimeValue; -import org.neo4j.values.storable.LocalTimeValue; -import org.neo4j.values.storable.LongArray; -import org.neo4j.values.storable.TimeValue; -import org.neo4j.values.storable.TimeZones; -import org.neo4j.values.storable.Value; -import org.neo4j.values.storable.Values; -import org.neo4j.values.utils.TemporalUtil; - -import static java.time.ZoneOffset.UTC; - -/** - * For the PropertyStore format, check {@link PropertyStore}. - * For the array format, check {@link DynamicArrayStore}. - */ -public enum TemporalType -{ - TEMPORAL_INVALID( 0, "Invalid" ) - { - @Override - public Value decodeForTemporal( long[] valueBlocks, int offset ) - { - throw new UnsupportedOperationException( "Cannot decode invalid temporal" ); - } - - @Override - public int calculateNumberOfBlocksUsedForTemporal( long firstBlock ) - { - return PropertyType.BLOCKS_USED_FOR_BAD_TYPE_OR_ENCODING; - } - - @Override - public ArrayValue decodeArray( Value dataValue ) - { - throw new UnsupportedOperationException( "Cannot decode invalid temporal array" ); - } - }, - TEMPORAL_DATE( 1, "Date" ) - { - @Override - public Value decodeForTemporal( long[] valueBlocks, int offset ) - { - long epochDay = valueIsInlined( valueBlocks[offset] ) ? valueBlocks[offset] >>> 33 : valueBlocks[1 + offset]; - return DateValue.epochDate( epochDay ); - } - - @Override - public int calculateNumberOfBlocksUsedForTemporal( long firstBlock ) - { - return valueIsInlined( firstBlock ) ? BLOCKS_LONG_INLINED : BLOCKS_LONG_NOT_INLINED; - } - - @Override - public ArrayValue decodeArray( Value dataValue ) - { - if ( dataValue instanceof LongArray ) - { - LongArray numbers = (LongArray) dataValue; - LocalDate[] dates = new LocalDate[numbers.length()]; - for ( int i = 0; i < dates.length; i++ ) - { - dates[i] = LocalDate.ofEpochDay( numbers.longValue( i ) ); - } - return Values.dateArray( dates ); - } - else - { - throw new InvalidRecordException( "Array with unexpected type. Actual:" - + dataValue.getClass().getSimpleName() + ". Expected: LongArray." ); - } - } - - private boolean valueIsInlined( long firstBlock ) - { - // [][][][i][ssss,tttt][kkkk,kkkk][kkkk,kkkk][kkkk,kkkk] - return (firstBlock & 0x100000000L) > 0; - } - }, - TEMPORAL_LOCAL_TIME( 2, "LocalTime" ) - { - @Override - public Value decodeForTemporal( long[] valueBlocks, int offset ) - { - long nanoOfDay = valueIsInlined( valueBlocks[offset] ) ? valueBlocks[offset] >>> 33 : valueBlocks[1 + offset]; - checkValidNanoOfDay( nanoOfDay ); - return LocalTimeValue.localTime( nanoOfDay ); - } - - @Override - public int calculateNumberOfBlocksUsedForTemporal( long firstBlock ) - { - return valueIsInlined( firstBlock ) ? BLOCKS_LONG_INLINED : BLOCKS_LONG_NOT_INLINED; - } - - @Override - public ArrayValue decodeArray( Value dataValue ) - { - if ( dataValue instanceof LongArray ) - { - LongArray numbers = (LongArray) dataValue; - LocalTime[] times = new LocalTime[numbers.length()]; - for ( int i = 0; i < times.length; i++ ) - { - long nanoOfDay = numbers.longValue( i ); - checkValidNanoOfDay( nanoOfDay ); - times[i] = LocalTime.ofNanoOfDay( nanoOfDay ); - } - return Values.localTimeArray( times ); - } - else - { - throw new InvalidRecordException( "Array with unexpected type. Actual:" - + dataValue.getClass().getSimpleName() + ". Expected: LongArray." ); - } - } - - private boolean valueIsInlined( long firstBlock ) - { - // [][][][i][ssss,tttt][kkkk,kkkk][kkkk,kkkk][kkkk,kkkk] - return (firstBlock & 0x100000000L) > 0; - } - }, - TEMPORAL_LOCAL_DATE_TIME( 3, "LocalDateTime" ) - { - @Override - public Value decodeForTemporal( long[] valueBlocks, int offset ) - { - long nanoOfSecond = valueBlocks[offset] >>> 32; - checkValidNanoOfSecond( nanoOfSecond ); - long epochSecond = valueBlocks[1 + offset]; - return LocalDateTimeValue.localDateTime( epochSecond, nanoOfSecond ); - } - - @Override - public int calculateNumberOfBlocksUsedForTemporal( long firstBlock ) - { - return BLOCKS_LOCAL_DATETIME; - } - - @Override - public ArrayValue decodeArray( Value dataValue ) - { - if ( dataValue instanceof LongArray ) - { - LongArray numbers = (LongArray) dataValue; - LocalDateTime[] dateTimes = new LocalDateTime[numbers.length() / BLOCKS_LOCAL_DATETIME]; - for ( int i = 0; i < dateTimes.length; i++ ) - { - long epochSecond = numbers.longValue( i * BLOCKS_LOCAL_DATETIME ); - long nanoOfSecond = numbers.longValue( i * BLOCKS_LOCAL_DATETIME + 1 ); - checkValidNanoOfSecond( nanoOfSecond ); - dateTimes[i] = LocalDateTime.ofInstant( - Instant.ofEpochSecond( epochSecond, nanoOfSecond ), - UTC ); - } - return Values.localDateTimeArray( dateTimes ); - } - else - { - throw new InvalidRecordException( "Array with unexpected type. Actual:" - + dataValue.getClass().getSimpleName() + ". Expected: LongArray." ); - } - } - }, - TEMPORAL_TIME( 4, "Time" ) - { - @Override - public Value decodeForTemporal( long[] valueBlocks, int offset ) - { - int secondOffset = (int) (valueBlocks[offset] >>> 32); - long nanoOfDay = valueBlocks[1 + offset]; - checkValidNanoOfDayWithOffset( nanoOfDay, secondOffset ); - return TimeValue.time( nanoOfDay, ZoneOffset.ofTotalSeconds( secondOffset ) ); - } - - @Override - public int calculateNumberOfBlocksUsedForTemporal( long firstBlock ) - { - return BLOCKS_TIME; - } - - @Override - public ArrayValue decodeArray( Value dataValue ) - { - if ( dataValue instanceof LongArray ) - { - LongArray numbers = (LongArray) dataValue; - OffsetTime[] times = new OffsetTime[(int) (numbers.length() / BLOCKS_TIME)]; - for ( int i = 0; i < times.length; i++ ) - { - long nanoOfDay = numbers.longValue( i * BLOCKS_TIME ); - int secondOffset = (int) numbers.longValue( i * BLOCKS_TIME + 1 ); - checkValidNanoOfDay( nanoOfDay ); - times[i] = OffsetTime.of( LocalTime.ofNanoOfDay( nanoOfDay ), ZoneOffset.ofTotalSeconds( secondOffset ) ); - } - return Values.timeArray( times ); - } - else - { - throw new InvalidRecordException( "Array with unexpected type. Actual:" - + dataValue.getClass().getSimpleName() + ". Expected: LongArray." ); - } - } - }, - TEMPORAL_DATE_TIME( 5, "DateTime" ) - { - @Override - public Value decodeForTemporal( long[] valueBlocks, int offset ) - { - if ( storingZoneOffset( valueBlocks[offset] ) ) - { - int nanoOfSecond = (int) (valueBlocks[offset] >>> 33); - checkValidNanoOfSecond( nanoOfSecond ); - long epochSecond = valueBlocks[1 + offset]; - int secondOffset = (int) valueBlocks[2 + offset]; - return DateTimeValue.datetime( epochSecond, nanoOfSecond, ZoneOffset.ofTotalSeconds( secondOffset ) ); - } - else - { - int nanoOfSecond = (int) (valueBlocks[offset] >>> 33); - checkValidNanoOfSecond( nanoOfSecond ); - long epochSecond = valueBlocks[1 + offset]; - short zoneNumber = (short) valueBlocks[2 + offset]; - return DateTimeValue.datetime( epochSecond, nanoOfSecond, - ZoneId.of( TimeZones.map( zoneNumber ) ) ); - } - } - - @Override - public int calculateNumberOfBlocksUsedForTemporal( long firstBlock ) - { - return BLOCKS_DATETIME; - } - - @Override - public ArrayValue decodeArray( Value dataValue ) - { - if ( dataValue instanceof LongArray ) - { - LongArray numbers = (LongArray) dataValue; - ZonedDateTime[] dateTimes = new ZonedDateTime[numbers.length() / BLOCKS_DATETIME]; - for ( int i = 0; i < dateTimes.length; i++ ) - { - long epochSecond = numbers.longValue( i * BLOCKS_DATETIME ); - long nanoOfSecond = numbers.longValue( i * BLOCKS_DATETIME + 1 ); - checkValidNanoOfSecond( nanoOfSecond ); - long zoneValue = numbers.longValue( i * BLOCKS_DATETIME + 2 ); - if ( (zoneValue & 1) == 1 ) - { - int secondOffset = (int) (zoneValue >>> 1); - dateTimes[i] = - ZonedDateTime.ofInstant( Instant.ofEpochSecond( epochSecond, nanoOfSecond ), - ZoneOffset.ofTotalSeconds( secondOffset ) ); - } - else - { - short zoneNumber = (short) (zoneValue >>> 1); - dateTimes[i] = ZonedDateTime.ofInstant( Instant.ofEpochSecond( epochSecond, nanoOfSecond ), - ZoneId.of( TimeZones.map( zoneNumber ) ) ); - } - } - return Values.dateTimeArray( dateTimes ); - } - else - { - throw new InvalidRecordException( - "LocalTime array with unexpected type. Actual:" + dataValue.getClass().getSimpleName() + ". Expected: LongArray." ); - } - } - - private boolean storingZoneOffset( long firstBlock ) - { - // [][][][i][ssss,tttt][kkkk,kkkk][kkkk,kkkk][kkkk,kkkk] - return (firstBlock & 0x100000000L) > 0; - } - }, - TEMPORAL_DURATION( 6, "Duration" ) - { - @Override - public Value decodeForTemporal( long[] valueBlocks, int offset ) - { - int nanos = (int) (valueBlocks[offset] >>> 32); - long months = valueBlocks[1 + offset]; - long days = valueBlocks[2 + offset]; - long seconds = valueBlocks[3 + offset]; - return DurationValue.duration( months, days, seconds, nanos ); - } - - @Override - public int calculateNumberOfBlocksUsedForTemporal( long firstBlock ) - { - return BLOCKS_DURATION; - } - - @Override - public ArrayValue decodeArray( Value dataValue ) - { - if ( dataValue instanceof LongArray ) - { - LongArray numbers = (LongArray) dataValue; - DurationValue[] durations = new DurationValue[(int) (numbers.length() / BLOCKS_DURATION)]; - for ( int i = 0; i < durations.length; i++ ) - { - durations[i] = DurationValue.duration( numbers.longValue( i * BLOCKS_DURATION ), numbers.longValue( i * BLOCKS_DURATION + 1 ), - numbers.longValue( i * BLOCKS_DURATION + 2 ), numbers.longValue( i * BLOCKS_DURATION + 3 ) ); - } - return Values.durationArray( durations ); - } - else - { - throw new InvalidRecordException( "Array with unexpected type. Actual:" + dataValue.getClass().getSimpleName() + ". Expected: LongArray." ); - } - } - }; - - private static final int BLOCKS_LONG_INLINED = 1; - private static final int BLOCKS_LONG_NOT_INLINED = 2; - private static final int BLOCKS_LOCAL_DATETIME = 2; - private static final int BLOCKS_TIME = 2; - private static final int BLOCKS_DATETIME = 3; - private static final int BLOCKS_DURATION = 4; - - /** - * Handler for header information for Temporal objects and arrays of Temporal objects - */ - public static class TemporalHeader - { - private final int temporalType; - - private TemporalHeader( int temporalType ) - { - this.temporalType = temporalType; - } - - private void writeArrayHeaderTo( byte[] bytes ) - { - bytes[0] = (byte) PropertyType.TEMPORAL.intValue(); - bytes[1] = (byte) temporalType; - } - - static TemporalHeader fromArrayHeaderBytes( byte[] header ) - { - int temporalType = Byte.toUnsignedInt( header[1] ); - return new TemporalHeader( temporalType ); - } - - public static TemporalHeader fromArrayHeaderByteBuffer( ByteBuffer buffer ) - { - int temporalType = Byte.toUnsignedInt( buffer.get() ); - return new TemporalHeader( temporalType ); - } - } - - private static final TemporalType[] TYPES = TemporalType.values(); - private static final Map all = new HashMap<>( TYPES.length ); - - static - { - for ( TemporalType temporalType : TYPES ) - { - all.put( temporalType.name, temporalType ); - } - } - - private static final long TEMPORAL_TYPE_MASK = 0x00000000F0000000L; - - private static int getTemporalType( long firstBlock ) - { - return (int) ((firstBlock & TEMPORAL_TYPE_MASK) >> 28); - } - - public static int calculateNumberOfBlocksUsed( long firstBlock ) - { - TemporalType geometryType = find( getTemporalType( firstBlock ) ); - return geometryType.calculateNumberOfBlocksUsedForTemporal( firstBlock ); - } - - private static TemporalType find( int temporalType ) - { - if ( temporalType < TYPES.length && temporalType >= 0 ) - { - return TYPES[temporalType]; - } - else - { - // Kernel code requires no exceptions in deeper PropertyChain processing of corrupt/invalid data - return TEMPORAL_INVALID; - } - } - - /** - * Any out of range value means a store corruption - */ - private static void checkValidNanoOfDay( long nanoOfDay ) - { - if ( nanoOfDay > LocalTime.MAX.toNanoOfDay() || nanoOfDay < LocalTime.MIN.toNanoOfDay() ) - { - throw new InvalidRecordException( "Nanosecond of day out of range:" + nanoOfDay ); - } - } - - /** - * Any out of range value means a store corruption - */ - private static void checkValidNanoOfDayWithOffset( long nanoOfDayUTC, int secondOffset ) - { - long nanoOfDay = TemporalUtil.nanosOfDayToLocal( nanoOfDayUTC, secondOffset ); - checkValidNanoOfDay( nanoOfDay ); - } - - /** - * Any out of range value means a store corruption - */ - private static void checkValidNanoOfSecond( long nanoOfSecond ) - { - if ( nanoOfSecond > 999_999_999 || nanoOfSecond < 0 ) - { - throw new InvalidRecordException( "Nanosecond of second out of range:" + nanoOfSecond ); - } - } - - public static Value decode( PropertyBlock block ) - { - return decode( block.getValueBlocks(), 0 ); - } - - public static Value decode( long[] valueBlocks, int offset ) - { - long firstBlock = valueBlocks[offset]; - int temporalType = getTemporalType( firstBlock ); - return find( temporalType ).decodeForTemporal( valueBlocks, offset ); - } - - public static long[] encodeDate( int keyId, long epochDay ) - { - return encodeLong( keyId, epochDay, TemporalType.TEMPORAL_DATE.temporalType ); - } - - public static long[] encodeLocalTime( int keyId, long nanoOfDay ) - { - return encodeLong( keyId, nanoOfDay, TemporalType.TEMPORAL_LOCAL_TIME.temporalType ); - } - - private static long[] encodeLong( int keyId, long val, int temporalType ) - { - int idBits = StandardFormatSettings.PROPERTY_TOKEN_MAXIMUM_ID_BITS; - - long keyAndType = keyId | (((long) (PropertyType.TEMPORAL.intValue()) << idBits)); - long temporalTypeBits = temporalType << (idBits + 4); - - long[] data; - if ( ShortArray.LONG.getRequiredBits( val ) <= 64 - 33 ) - { // We only need one block for this value - data = new long[BLOCKS_LONG_INLINED]; - data[0] = keyAndType | temporalTypeBits | (1L << 32) | (val << 33); - } - else - { // We need two blocks for this value - data = new long[BLOCKS_LONG_NOT_INLINED]; - data[0] = keyAndType | temporalTypeBits; - data[1] = val; - } - - return data; - } - - public static long[] encodeLocalDateTime( int keyId, long epochSecond, long nanoOfSecond ) - { - int idBits = StandardFormatSettings.PROPERTY_TOKEN_MAXIMUM_ID_BITS; - - long keyAndType = keyId | (((long) (PropertyType.TEMPORAL.intValue()) << idBits)); - long temporalTypeBits = TemporalType.TEMPORAL_LOCAL_DATE_TIME.temporalType << (idBits + 4); - - long[] data = new long[BLOCKS_LOCAL_DATETIME]; - // nanoOfSecond will never require more than 30 bits - data[0] = keyAndType | temporalTypeBits | (nanoOfSecond << 32); - data[1] = epochSecond; - - return data; - } - - public static long[] encodeDateTime( int keyId, long epochSecond, long nanoOfSecond, String zoneId ) - { - int idBits = StandardFormatSettings.PROPERTY_TOKEN_MAXIMUM_ID_BITS; - short zoneNumber = TimeZones.map( zoneId ); - - long keyAndType = keyId | (((long) (PropertyType.TEMPORAL.intValue()) << idBits)); - long temporalTypeBits = TemporalType.TEMPORAL_DATE_TIME.temporalType << (idBits + 4); - - long[] data = new long[BLOCKS_DATETIME]; - // nanoOfSecond will never require more than 30 bits - data[0] = keyAndType | temporalTypeBits | (nanoOfSecond << 33); - data[1] = epochSecond; - data[2] = zoneNumber; - - return data; - } - - public static long[] encodeDateTime( int keyId, long epochSecond, long nanoOfSecond, int secondOffset ) - { - int idBits = StandardFormatSettings.PROPERTY_TOKEN_MAXIMUM_ID_BITS; - - long keyAndType = keyId | (((long) (PropertyType.TEMPORAL.intValue()) << idBits)); - long temporalTypeBits = TemporalType.TEMPORAL_DATE_TIME.temporalType << (idBits + 4); - - long[] data = new long[BLOCKS_DATETIME]; - // nanoOfSecond will never require more than 30 bits - data[0] = keyAndType | temporalTypeBits | (1L << 32) | (nanoOfSecond << 33); - data[1] = epochSecond; - data[2] = secondOffset; - - return data; - } - - public static long[] encodeTime( int keyId, long nanoOfDayUTC, int secondOffset ) - { - int idBits = StandardFormatSettings.PROPERTY_TOKEN_MAXIMUM_ID_BITS; - - long keyAndType = keyId | (((long) (PropertyType.TEMPORAL.intValue()) << idBits)); - long temporalTypeBits = TemporalType.TEMPORAL_TIME.temporalType << (idBits + 4); - - long[] data = new long[BLOCKS_TIME]; - // Offset are always in the range +-18:00, so secondOffset will never require more than 17 bits - data[0] = keyAndType | temporalTypeBits | ((long) secondOffset << 32); - data[1] = nanoOfDayUTC; - - return data; - } - - public static long[] encodeDuration( int keyId, long months, long days, long seconds, int nanos ) - { - int idBits = StandardFormatSettings.PROPERTY_TOKEN_MAXIMUM_ID_BITS; - - long keyAndType = keyId | (((long) (PropertyType.TEMPORAL.intValue()) << idBits)); - long temporalTypeBits = TemporalType.TEMPORAL_DURATION.temporalType << (idBits + 4); - - long[] data = new long[BLOCKS_DURATION]; - data[0] = keyAndType | temporalTypeBits | ((long) nanos << 32); - data[1] = months; - data[2] = days; - data[3] = seconds; - - return data; - } - - public static byte[] encodeDateArray( LocalDate[] dates ) - { - long[] data = new long[dates.length]; - for ( int i = 0; i < data.length; i++ ) - { - data[i] = dates[i].toEpochDay(); - } - TemporalHeader header = new TemporalHeader( TemporalType.TEMPORAL_DATE.temporalType ); - byte[] bytes = DynamicArrayStore.encodeFromNumbers( data, DynamicArrayStore.TEMPORAL_HEADER_SIZE ); - header.writeArrayHeaderTo( bytes ); - return bytes; - } - - public static byte[] encodeLocalTimeArray( LocalTime[] times ) - { - long[] data = new long[times.length]; - for ( int i = 0; i < data.length; i++ ) - { - data[i] = times[i].toNanoOfDay(); - } - TemporalHeader header = new TemporalHeader( TemporalType.TEMPORAL_LOCAL_TIME.temporalType ); - byte[] bytes = DynamicArrayStore.encodeFromNumbers( data, DynamicArrayStore.TEMPORAL_HEADER_SIZE ); - header.writeArrayHeaderTo( bytes ); - return bytes; - } - - public static byte[] encodeLocalDateTimeArray( LocalDateTime[] dateTimes ) - { - long[] data = new long[dateTimes.length * BLOCKS_LOCAL_DATETIME]; - for ( int i = 0; i < dateTimes.length; i++ ) - { - data[i * BLOCKS_LOCAL_DATETIME] = dateTimes[i].toEpochSecond( UTC ); - data[i * BLOCKS_LOCAL_DATETIME + 1] = dateTimes[i].getNano(); - } - TemporalHeader header = new TemporalHeader( TemporalType.TEMPORAL_LOCAL_DATE_TIME.temporalType ); - byte[] bytes = DynamicArrayStore.encodeFromNumbers( data, DynamicArrayStore.TEMPORAL_HEADER_SIZE ); - header.writeArrayHeaderTo( bytes ); - return bytes; - } - - public static byte[] encodeTimeArray( OffsetTime[] times ) - { - // We could store this in dateTimes.length * 1.5 if we wanted - long[] data = new long[(int) (Math.ceil( times.length * BLOCKS_TIME ))]; - int i; - for ( i = 0; i < times.length; i++ ) - { - data[i * BLOCKS_TIME] = times[i].toLocalTime().toNanoOfDay(); - data[i * BLOCKS_TIME + 1] = times[i].getOffset().getTotalSeconds(); - } - - TemporalHeader header = new TemporalHeader( TemporalType.TEMPORAL_TIME.temporalType ); - byte[] bytes = DynamicArrayStore.encodeFromNumbers( data, DynamicArrayStore.TEMPORAL_HEADER_SIZE ); - header.writeArrayHeaderTo( bytes ); - return bytes; - } - - public static byte[] encodeDateTimeArray( ZonedDateTime[] dateTimes ) - { - // We could store this in dateTimes.length * 2.5 if we wanted - long[] data = new long[dateTimes.length * BLOCKS_DATETIME]; - - int i; - for ( i = 0; i < dateTimes.length; i++ ) - { - data[i * BLOCKS_DATETIME] = dateTimes[i].toEpochSecond(); - data[i * BLOCKS_DATETIME + 1] = dateTimes[i].getNano(); - if ( dateTimes[i].getZone() instanceof ZoneOffset ) - { - ZoneOffset offset = (ZoneOffset) dateTimes[i].getZone(); - int secondOffset = offset.getTotalSeconds(); - // Set lowest bit to 1 means offset - data[i * BLOCKS_DATETIME + 2] = secondOffset << 1 | 1L; - } - else - { - String timeZoneId = dateTimes[i].getZone().getId(); - short zoneNumber = TimeZones.map( timeZoneId ); - // Set lowest bit to 0 means zone id - data[i * BLOCKS_DATETIME + 2] = zoneNumber << 1; - } - } - - TemporalHeader header = new TemporalHeader( TemporalType.TEMPORAL_DATE_TIME.temporalType ); - byte[] bytes = DynamicArrayStore.encodeFromNumbers( data, DynamicArrayStore.TEMPORAL_HEADER_SIZE ); - header.writeArrayHeaderTo( bytes ); - return bytes; - } - - public static byte[] encodeDurationArray( DurationValue[] durations ) - { - long[] data = new long[durations.length * BLOCKS_DURATION]; - for ( int i = 0; i < durations.length; i++ ) - { - data[i * BLOCKS_DURATION] = durations[i].get( ChronoUnit.MONTHS ); - data[i * BLOCKS_DURATION + 1] = durations[i].get( ChronoUnit.DAYS ); - data[i * BLOCKS_DURATION + 2] = durations[i].get( ChronoUnit.SECONDS ); - data[i * BLOCKS_DURATION + 3] = durations[i].get( ChronoUnit.NANOS ); - } - TemporalHeader header = new TemporalHeader( TemporalType.TEMPORAL_DURATION.temporalType ); - byte[] bytes = DynamicArrayStore.encodeFromNumbers( data, DynamicArrayStore.TEMPORAL_HEADER_SIZE ); - header.writeArrayHeaderTo( bytes ); - return bytes; - } - - public static ArrayValue decodeTemporalArray( ContextMap ic, TemporalHeader header, byte[] data ) - { - byte[] dataHeader = PropertyType.ARRAY.readDynamicRecordHeader( data ); - byte[] dataBody = new byte[data.length - dataHeader.length]; - System.arraycopy( data, dataHeader.length, dataBody, 0, dataBody.length ); - Value dataValue = DynamicArrayStore.getRightArray( ic, Pair.of( dataHeader, dataBody ) ); - return find( header.temporalType ).decodeArray( dataValue ); - } - - private final int temporalType; - private final String name; - - TemporalType( int temporalType, String name ) - { - this.temporalType = temporalType; - this.name = name; - } - - public abstract Value decodeForTemporal( long[] valueBlocks, int offset ); - - public abstract int calculateNumberOfBlocksUsedForTemporal( long firstBlock ); - - public abstract ArrayValue decodeArray( Value dataValue ); - - public String getName() - { - return name; - } -} diff --git a/src/blob/scala/org/neo4j/kernel/impl/InstanceContext.scala b/src/blob/scala/org/neo4j/kernel/impl/InstanceContext.scala deleted file mode 100644 index f4fa62bb..00000000 --- a/src/blob/scala/org/neo4j/kernel/impl/InstanceContext.scala +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -package org.neo4j.kernel.impl - -import cn.graiph.util.{ContextMap, ConfigUtils, Configuration, ReflectUtils} -import ReflectUtils._ -import org.neo4j.kernel.configuration.Config -import org.neo4j.kernel.impl.factory.GraphDatabaseFacade -import org.neo4j.kernel.impl.store.id.RenewableBatchIdSequence -import org.neo4j.kernel.impl.store.record.{PrimitiveRecord, PropertyRecord} -import org.neo4j.kernel.impl.store.{CommonAbstractStore, NeoStores, StandardDynamicRecordAllocator} -import org.neo4j.kernel.impl.transaction.state.RecordAccess - -import scala.collection.mutable.{Map => MMap} - -/** - * Created by bluejoe on 2019/4/16. - */ -object InstanceContext { - @deprecated("warning: no InstanceContext") - val none: ContextMap = new ContextMap(); - - def of(o: AnyRef, path: String): ContextMap = of(o._get(path)); - - def of(o: AnyRef): ContextMap = o match { - case x: StandardDynamicRecordAllocator => - of(x._get("idGenerator")); - - case x: NeoStores => - x._get("config").asInstanceOf[Config].getInstanceContext; - - case x: Config => - x.getInstanceContext; - - case x: CommonAbstractStore[_, _] => - x._get("configuration").asInstanceOf[Config].getInstanceContext; - - case x: RecordAccess[PropertyRecord, PrimitiveRecord] => - x._get("loader.val$store.configuration").asInstanceOf[Config].getInstanceContext; - - case x: RenewableBatchIdSequence => - of(x._get("source")); - - case x: GraphDatabaseFacade => - x._get("config").asInstanceOf[Config].getInstanceContext; - - case _ => - throw new FaileToGetInstanceContextException(o); - } - -} - -class FaileToGetInstanceContextException(o: AnyRef) extends RuntimeException { - -} - -object Neo4jConfigUtils { - implicit def neo4jConfig2Config(neo4jConf: Config) = new Configuration() { - override def getRaw(name: String): Option[String] = { - val raw = neo4jConf.getRaw(name); - if (raw.isPresent) { - Some(raw.get()) - } - else { - None - } - } - } - - implicit def neo4jConfig2Ex(neo4jConf: Config) = ConfigUtils.config2Ex(neo4jConfig2Config(neo4jConf)); -} \ No newline at end of file diff --git a/src/blob/scala/org/neo4j/kernel/impl/blob/BlobPropertyStoreService.scala b/src/blob/scala/org/neo4j/kernel/impl/blob/BlobPropertyStoreService.scala deleted file mode 100644 index 0aecc88c..00000000 --- a/src/blob/scala/org/neo4j/kernel/impl/blob/BlobPropertyStoreService.scala +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.kernel.impl.blob - -import java.io.File - -import cn.graiph.util.{ContextMap, Configuration, Logging} -import org.neo4j.graphdb.factory.GraphDatabaseSettings -import org.neo4j.kernel.configuration.Config -import org.neo4j.kernel.impl.Neo4jConfigUtils._ -import org.neo4j.kernel.impl.factory.DatabaseInfo -import org.neo4j.kernel.impl.proc.Procedures -import org.neo4j.kernel.lifecycle.Lifecycle - -import scala.collection.mutable.ArrayBuffer - -trait BlobPropertyStoreServicePlugin { - def init(ctx: BlobPropertyStoreServiceContext): Unit; - - def start(ctx: BlobPropertyStoreServiceContext): Unit; - - def stop(ctx: BlobPropertyStoreServiceContext): Unit; -} - -object BlobPropertyStoreServicePlugins extends Logging { - val plugins = ArrayBuffer[BlobPropertyStoreServicePlugin]( - new BlobStoragePlugin(), - new DefaultBlobFunctionsPlugin() - ); - - def add(plugin: BlobPropertyStoreServicePlugin) = plugins += plugin; - - def init(ctx: BlobPropertyStoreServiceContext): Unit = { - plugins.foreach { x => - x.init(ctx) - logger.debug(s"plugin initialized: $x"); - } - } - - def start(ctx: BlobPropertyStoreServiceContext): Unit = { - plugins.foreach { - _.start(ctx) - } - } - - def stop(ctx: BlobPropertyStoreServiceContext): Unit = { - plugins.foreach { - _.stop(ctx) - } - } -} - -class BlobStoragePlugin extends BlobPropertyStoreServicePlugin with Logging { - var blobStorage: BlobStorage = _; - - override def init(ctx: BlobPropertyStoreServiceContext): Unit = { - blobStorage = BlobStorage.create(ctx.configuration); - ctx.instanceContext.put[BlobStorage](blobStorage); - } - - override def stop(ctx: BlobPropertyStoreServiceContext): Unit = { - blobStorage.disconnect(); - logger.info(s"blob storage disconnected: $blobStorage"); - } - - override def start(ctx: BlobPropertyStoreServiceContext): Unit = { - blobStorage.initialize(new File(ctx.storeDir, - ctx.neo4jConf.get(GraphDatabaseSettings.active_database)), - ctx.configuration); - } -} - -class DefaultBlobFunctionsPlugin extends BlobPropertyStoreServicePlugin with Logging { - override def init(ctx: BlobPropertyStoreServiceContext): Unit = { - registerProcedure(ctx.proceduresService, classOf[DefaultBlobFunctions]); - } - - private def registerProcedure(proceduresService: Procedures, procedures: Class[_]*) { - for (procedure <- procedures) { - proceduresService.registerProcedure(procedure); - proceduresService.registerFunction(procedure); - } - } - - override def stop(ctx: BlobPropertyStoreServiceContext): Unit = { - } - - override def start(ctx: BlobPropertyStoreServiceContext): Unit = { - } -} - -case class BlobPropertyStoreServiceContext(proceduresService: Procedures, storeDir: File, neo4jConf: Config, databaseInfo: DatabaseInfo, configuration: Configuration, instanceContext: ContextMap) { - -} - -class BlobPropertyStoreService(proceduresService: Procedures, storeDir: File, neo4jConf: Config, databaseInfo: DatabaseInfo) - extends Lifecycle with Logging { - - val configuration: Configuration = neo4jConf; - val ctx = new BlobPropertyStoreServiceContext(proceduresService, storeDir, neo4jConf, databaseInfo, configuration, neo4jConf.getInstanceContext); - - /////binds context - neo4jConf.getInstanceContext.put[BlobPropertyStoreService](this); - - override def shutdown(): Unit = { - } - - override def init(): Unit = { - BlobPropertyStoreServicePlugins.init(ctx); - } - - override def stop(): Unit = { - BlobPropertyStoreServicePlugins.stop(ctx); - } - - override def start(): Unit = { - BlobPropertyStoreServicePlugins.start(ctx); - } -} \ No newline at end of file diff --git a/src/blob/scala/org/neo4j/server/AbstractNeoServer.java b/src/blob/scala/org/neo4j/server/AbstractNeoServer.java deleted file mode 100644 index 16037ad2..00000000 --- a/src/blob/scala/org/neo4j/server/AbstractNeoServer.java +++ /dev/null @@ -1,556 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.server; - -import org.apache.commons.configuration.Configuration; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URI; -import java.time.Clock; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.function.Supplier; -import java.util.regex.Pattern; - -import org.neo4j.graphdb.DependencyResolver; -import org.neo4j.graphdb.facade.GraphDatabaseFacadeFactory.Dependencies; -import org.neo4j.helpers.AdvertisedSocketAddress; -import org.neo4j.helpers.ListenSocketAddress; -import org.neo4j.helpers.RunCarefully; -import org.neo4j.internal.diagnostics.DiagnosticsManager; -import org.neo4j.io.fs.FileSystemAbstraction; -import org.neo4j.kernel.GraphDatabaseQueryService; -import org.neo4j.kernel.api.security.AuthManager; -import org.neo4j.kernel.api.security.UserManagerSupplier; -import org.neo4j.kernel.availability.AvailabilityGuard; -import org.neo4j.kernel.configuration.Config; -import org.neo4j.kernel.configuration.ConnectorPortRegister; -import org.neo4j.kernel.configuration.HttpConnector; -import org.neo4j.kernel.configuration.HttpConnector.Encryption; -import org.neo4j.kernel.configuration.ssl.SslPolicyLoader; -import org.neo4j.kernel.impl.query.QueryExecutionEngine; -import org.neo4j.kernel.internal.Version; -import org.neo4j.kernel.lifecycle.LifeSupport; -import org.neo4j.kernel.lifecycle.LifecycleAdapter; -import org.neo4j.logging.Log; -import org.neo4j.logging.LogProvider; -import org.neo4j.scheduler.Group; -import org.neo4j.scheduler.JobScheduler; -import org.neo4j.server.configuration.ServerSettings; -import org.neo4j.server.database.CypherExecutor; -import org.neo4j.server.database.CypherExecutorProvider; -import org.neo4j.server.database.Database; -import org.neo4j.server.database.DatabaseProvider; -import org.neo4j.server.database.GraphDatabaseServiceProvider; -import org.neo4j.server.database.GraphFactory; -import org.neo4j.server.database.InjectableProvider; -import org.neo4j.server.database.LifecycleManagingDatabase; -import org.neo4j.server.modules.RESTApiModule; -import org.neo4j.server.modules.ServerModule; -import org.neo4j.server.plugins.ConfigAdapter; -import org.neo4j.server.plugins.PluginInvocatorProvider; -import org.neo4j.server.plugins.PluginManager; -import org.neo4j.server.rest.repr.InputFormatProvider; -import org.neo4j.server.rest.repr.OutputFormatProvider; -import org.neo4j.server.rest.repr.RepresentationFormatRepository; -import org.neo4j.server.rest.transactional.TransactionFacade; -import org.neo4j.server.rest.transactional.TransactionFilter; -import org.neo4j.server.rest.transactional.TransactionHandleRegistry; -import org.neo4j.server.rest.transactional.TransactionRegistry; -import org.neo4j.server.rest.transactional.TransitionalPeriodTransactionMessContainer; -import org.neo4j.server.rest.web.DatabaseActions; -import org.neo4j.server.web.AsyncRequestLog; -import org.neo4j.server.web.SimpleUriBuilder; -import org.neo4j.server.web.WebServer; -import org.neo4j.server.web.WebServerProvider; -import org.neo4j.ssl.SslPolicy; -import org.neo4j.time.Clocks; -import org.neo4j.udc.UsageData; - -import static java.lang.Math.round; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.neo4j.graphdb.factory.GraphDatabaseSettings.db_timezone; -import static org.neo4j.helpers.collection.Iterables.map; -import static org.neo4j.server.configuration.ServerSettings.http_log_path; -import static org.neo4j.server.configuration.ServerSettings.http_logging_enabled; -import static org.neo4j.server.configuration.ServerSettings.http_logging_rotation_keep_number; -import static org.neo4j.server.configuration.ServerSettings.http_logging_rotation_size; -import static org.neo4j.server.database.InjectableProvider.providerForSingleton; -import static org.neo4j.server.database.InjectableProvider.providerFromSupplier; -import static org.neo4j.server.exception.ServerStartupErrors.translateToServerStartupError; - -public abstract class AbstractNeoServer implements NeoServer -{ - private static final long MINIMUM_TIMEOUT = 1000L; - /** - * We add a second to the timeout if the user configures a 1-second timeout. - *

- * This ensures the expiry time displayed to the user is always at least 1 second, even after it is rounded down. - */ - private static final long ROUNDING_SECOND = 1000L; - - private static final Pattern[] DEFAULT_URI_WHITELIST = new Pattern[]{ - Pattern.compile( "/browser.*" ), - Pattern.compile( "/" ) - }; - public static String NEO4J_IS_STARTING_MESSAGE = "======== Neo4j " + Version.getNeo4jVersion() + " ========"; - - protected final LogProvider userLogProvider; - private final Log log; - - private final List serverModules = new ArrayList<>(); - private final SimpleUriBuilder uriBuilder = new SimpleUriBuilder(); - private final Config config; - private final LifeSupport life = new LifeSupport(); - private final ListenSocketAddress httpListenAddress; - private final ListenSocketAddress httpsListenAddress; - private AdvertisedSocketAddress httpAdvertisedAddress; - private AdvertisedSocketAddress httpsAdvertisedAddress; - - protected final Database database; - private DependencyResolver dependencyResolver; - protected CypherExecutor cypherExecutor; - protected WebServer webServer; - protected Supplier authManagerSupplier; - protected Supplier userManagerSupplier; - protected Supplier sslPolicyFactorySupplier; - private DatabaseActions databaseActions; - private TransactionFacade transactionFacade; - - private TransactionHandleRegistry transactionRegistry; - private ConnectorPortRegister connectorPortRegister; - private HttpConnector httpConnector; - private HttpConnector httpsConnector; - private AsyncRequestLog requestLog; - private final Supplier availabilityGuardSupplier; - - protected abstract Iterable createServerModules(); - - protected abstract WebServer createWebServer(); - - public AbstractNeoServer( Config config, GraphFactory graphFactory, Dependencies dependencies ) - { - this.config = config; - this.userLogProvider = dependencies.userLogProvider(); - this.log = userLogProvider.getLog( getClass() ); - log.info( NEO4J_IS_STARTING_MESSAGE ); - - verifyConnectorsConfiguration( config ); - - httpConnector = findConnector( config, Encryption.NONE ); - httpListenAddress = listenAddressFor( config, httpConnector ); - httpAdvertisedAddress = advertisedAddressFor( config, httpConnector ); - - httpsConnector = findConnector( config, Encryption.TLS ); - httpsListenAddress = listenAddressFor( config, httpsConnector ); - httpsAdvertisedAddress = advertisedAddressFor( config, httpsConnector ); - - database = new LifecycleManagingDatabase( config, graphFactory, dependencies ); - this.availabilityGuardSupplier = ((LifecycleManagingDatabase) database)::getAvailabilityGuard; - life.add( database ); - life.add( new ServerDependenciesLifeCycleAdapter() ); - life.add( new ServerComponentsLifecycleAdapter() ); - } - - @Override - public void start() throws ServerStartupException - { - try - { - life.start(); - } - catch ( Throwable t ) - { - // If the database has been started, attempt to cleanly shut it down to avoid unclean shutdowns. - life.shutdown(); - - throw translateToServerStartupError( t ); - } - } - - public DependencyResolver getDependencyResolver() - { - return dependencyResolver; - } - - protected DatabaseActions createDatabaseActions() - { - return new DatabaseActions( database.getGraph() ); - } - - private TransactionFacade createTransactionalActions() - { - final long timeoutMillis = getTransactionTimeoutMillis(); - final Clock clock = Clocks.systemClock(); - - transactionRegistry = new TransactionHandleRegistry( clock, timeoutMillis, userLogProvider ); - - // ensure that this is > 0 - long runEvery = round( timeoutMillis / 2.0 ); - - resolveDependency( JobScheduler.class ).scheduleRecurring( Group.SERVER_TRANSACTION_TIMEOUT, () -> - { - long maxAge = clock.millis() - timeoutMillis; - transactionRegistry.rollbackSuspendedTransactionsIdleSince( maxAge ); - }, runEvery, MILLISECONDS ); - - return new TransactionFacade( - new TransitionalPeriodTransactionMessContainer( database.getGraph() ), - resolveDependency( QueryExecutionEngine.class ), - resolveDependency( GraphDatabaseQueryService.class ), - transactionRegistry, - userLogProvider - ); - } - - /** - * We are going to ensure the minimum timeout is 2 seconds. The timeout value is communicated to the user in - * seconds rounded down, meaning if a user set a 1 second timeout, he would be told there was less than 1 second - * remaining before he would need to renew the timeout. - */ - private long getTransactionTimeoutMillis() - { - final long timeout = config.get( ServerSettings.transaction_idle_timeout ).toMillis(); - return Math.max( timeout, MINIMUM_TIMEOUT + ROUNDING_SECOND ); - } - - /** - * Use this method to register server modules from subclasses - */ - protected final void registerModule( ServerModule module ) - { - serverModules.add( module ); - } - - private void startModules() - { - for ( ServerModule module : serverModules ) - { - module.start(); - } - } - - private void stopModules() - { - new RunCarefully( map( module -> module::stop, serverModules ) ).run(); - } - - private void clearModules() - { - serverModules.clear(); - } - - @Override - public Config getConfig() - { - return config; - } - - protected void configureWebServer() - { - webServer.setHttpAddress( httpListenAddress ); - webServer.setHttpsAddress( httpsListenAddress ); - webServer.setMaxThreads( config.get( ServerSettings.webserver_max_threads ) ); - webServer.setWadlEnabled( config.get( ServerSettings.wadl_enabled ) ); - webServer.setDefaultInjectables( createDefaultInjectables() ); - - String sslPolicyName = config.get( ServerSettings.ssl_policy ); - if ( sslPolicyName != null ) - { - SslPolicy sslPolicy = sslPolicyFactorySupplier.get().getPolicy( sslPolicyName ); - webServer.setSslPolicy( sslPolicy ); - } - } - - protected void startWebServer() throws Exception - { - try - { - setUpHttpLogging(); - webServer.start(); - registerHttpAddressAfterStartup(); - registerHttpsAddressAfterStartup(); - log.info( "Remote interface available at %s", baseUri() ); - } - catch ( Exception e ) - { - ListenSocketAddress address = httpListenAddress != null ? httpListenAddress : httpsListenAddress; - log.error( "Failed to start Neo4j on %s: %s", address, e.getMessage() ); - throw e; - } - } - - private void registerHttpAddressAfterStartup() - { - if ( httpConnector != null ) - { - InetSocketAddress localHttpAddress = webServer.getLocalHttpAddress(); - connectorPortRegister.register( httpConnector.key(), localHttpAddress ); - if ( httpAdvertisedAddress.getPort() == 0 ) - { - httpAdvertisedAddress = new AdvertisedSocketAddress( localHttpAddress.getHostString(), localHttpAddress.getPort() ); - } - } - } - - private void registerHttpsAddressAfterStartup() - { - if ( httpsConnector != null ) - { - InetSocketAddress localHttpsAddress = webServer.getLocalHttpsAddress(); - connectorPortRegister.register( httpsConnector.key(), localHttpsAddress ); - if ( httpsAdvertisedAddress.getPort() == 0 ) - { - httpsAdvertisedAddress = new AdvertisedSocketAddress( localHttpsAddress.getHostString(), localHttpsAddress.getPort() ); - } - } - } - - private void setUpHttpLogging() throws IOException - { - if ( !getConfig().get( http_logging_enabled ) ) - { - return; - } - - requestLog = new AsyncRequestLog( - dependencyResolver.resolveDependency( FileSystemAbstraction.class ), - config.get( db_timezone ).getZoneId(), - config.get( http_log_path ).toString(), - config.get( http_logging_rotation_size ), - config.get( http_logging_rotation_keep_number ) ); - webServer.setRequestLog( requestLog ); - } - - protected Pattern[] getUriWhitelist() - { - return DEFAULT_URI_WHITELIST; - } - - @Override - public void stop() - { - tryShutdownAvailabiltyGuard(); - life.stop(); - } - - private void tryShutdownAvailabiltyGuard() - { - AvailabilityGuard guard = availabilityGuardSupplier.get(); - if ( guard != null ) - { - guard.shutdown(); - } - } - - private void stopWebServer() throws Exception - { - if ( webServer != null ) - { - webServer.stop(); - } - if ( requestLog != null ) - { - requestLog.stop(); - } - } - - @Override - public Database getDatabase() - { - return database; - } - - @Override - public TransactionRegistry getTransactionRegistry() - { - return transactionRegistry; - } - - @Override - public URI baseUri() - { - return httpAdvertisedAddress != null - ? uriBuilder.buildURI( httpAdvertisedAddress, false ) - : uriBuilder.buildURI( httpsAdvertisedAddress, true ); - } - - public Optional httpsUri() - { - return Optional.ofNullable( httpsAdvertisedAddress ) - .map( address -> uriBuilder.buildURI( address, true ) ); - } - - public WebServer getWebServer() - { - return webServer; - } - - @Override - public PluginManager getExtensionManager() - { - RESTApiModule module = getModule( RESTApiModule.class ); - if ( module != null ) - { - return module.getPlugins(); - } - return null; - } - - protected Collection> createDefaultInjectables() - { - Collection> singletons = new ArrayList<>(); - - Database database = getDatabase(); - - singletons.add( new DatabaseProvider( database ) ); - singletons.add( new DatabaseActions.Provider( databaseActions ) ); - singletons.add( new GraphDatabaseServiceProvider( database ) ); - singletons.add( new NeoServerProvider( this ) ); - singletons.add( providerForSingleton( new ConfigAdapter( getConfig() ), Configuration.class ) ); - singletons.add( providerForSingleton( getConfig(), Config.class ) ); - - singletons.add( new WebServerProvider( getWebServer() ) ); - - PluginInvocatorProvider pluginInvocatorProvider = new PluginInvocatorProvider( this ); - singletons.add( pluginInvocatorProvider ); - RepresentationFormatRepository repository = new RepresentationFormatRepository( this ); - - singletons.add( new InputFormatProvider( repository ) ); - singletons.add( new OutputFormatProvider( repository ) ); - singletons.add( new CypherExecutorProvider( cypherExecutor ) ); - - singletons.add( providerForSingleton( transactionFacade, TransactionFacade.class ) ); - singletons.add( providerFromSupplier( authManagerSupplier, AuthManager.class ) ); - singletons.add( providerFromSupplier( userManagerSupplier, UserManagerSupplier.class ) ); - singletons.add( new TransactionFilter( database ) ); - singletons.add( new LoggingProvider( userLogProvider ) ); - singletons.add( providerForSingleton( userLogProvider.getLog( NeoServer.class ), Log.class ) ); - singletons.add( providerForSingleton( resolveDependency( UsageData.class ), UsageData.class ) ); - - return singletons; - } - - @SuppressWarnings( "unchecked" ) - private T getModule( Class clazz ) - { - for ( ServerModule sm : serverModules ) - { - if ( sm.getClass() == clazz ) - { - return (T) sm; - } - } - - return null; - } - - protected T resolveDependency( Class type ) - { - return dependencyResolver.resolveDependency( type ); - } - - private static void verifyConnectorsConfiguration( Config config ) - { - HttpConnector httpConnector = findConnector( config, Encryption.NONE ); - HttpConnector httpsConnector = findConnector( config, Encryption.TLS ); - - if ( httpConnector == null && httpsConnector == null ) - { - throw new IllegalArgumentException( "Either HTTP or HTTPS connector must be configured to run the server" ); - } - } - - private static HttpConnector findConnector( Config config, Encryption encryption ) - { - return config.enabledHttpConnectors() - .stream() - .filter( connector -> connector.encryptionLevel() == encryption ) - .findFirst() - .orElse( null ); - } - - private static ListenSocketAddress listenAddressFor( Config config, HttpConnector connector ) - { - return connector == null ? null : config.get( connector.listen_address ); - } - - private static AdvertisedSocketAddress advertisedAddressFor( Config config, HttpConnector connector ) - { - return connector == null ? null : config.get( connector.advertised_address ); - } - - private class ServerDependenciesLifeCycleAdapter extends LifecycleAdapter - { - @Override - public void start() - { - dependencyResolver = database.getGraph().getDependencyResolver(); - - authManagerSupplier = dependencyResolver.provideDependency( AuthManager.class ); - userManagerSupplier = dependencyResolver.provideDependency( UserManagerSupplier.class ); - sslPolicyFactorySupplier = dependencyResolver.provideDependency( SslPolicyLoader.class ); - webServer = createWebServer(); - - for ( ServerModule moduleClass : createServerModules() ) - { - registerModule( moduleClass ); - } - } - } - - private class ServerComponentsLifecycleAdapter extends LifecycleAdapter - { - @Override - public void start() throws Throwable - { - DiagnosticsManager diagnosticsManager = resolveDependency( DiagnosticsManager.class ); - Log diagnosticsLog = diagnosticsManager.getTargetLog(); - diagnosticsLog.info( "--- SERVER STARTED START ---" ); - connectorPortRegister = dependencyResolver.resolveDependency( ConnectorPortRegister.class ); - databaseActions = createDatabaseActions(); - - transactionFacade = createTransactionalActions(); - - cypherExecutor = new CypherExecutor( database, userLogProvider ); - - configureWebServer(); - - cypherExecutor.start(); - - startModules(); - - startWebServer(); - - diagnosticsLog.info( "--- SERVER STARTED END ---" ); - } - - @Override - public void stop() throws Exception - { - stopWebServer(); - stopModules(); - clearModules(); - } - } -} diff --git a/src/externel-properties/java/org/neo4j/kernel/impl/api/KernelImpl.java b/src/externel-properties/java/org/neo4j/kernel/impl/api/KernelImpl.java deleted file mode 100644 index 16c5c11a..00000000 --- a/src/externel-properties/java/org/neo4j/kernel/impl/api/KernelImpl.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.kernel.impl.api; - -import org.neo4j.internal.kernel.api.Transaction; -import org.neo4j.internal.kernel.api.exceptions.ProcedureException; -import org.neo4j.internal.kernel.api.exceptions.TransactionFailureException; -import org.neo4j.internal.kernel.api.security.LoginContext; -import org.neo4j.kernel.api.InwardKernel; -import org.neo4j.kernel.api.KernelTransaction; -import org.neo4j.kernel.api.TransactionHook; -import org.neo4j.kernel.api.proc.CallableProcedure; -import org.neo4j.kernel.api.proc.CallableUserAggregationFunction; -import org.neo4j.kernel.api.proc.CallableUserFunction; -import org.neo4j.kernel.configuration.Config; -import org.neo4j.kernel.impl.CustomPropertyNodeStoreHook; -import org.neo4j.kernel.impl.Settings; -import org.neo4j.kernel.impl.proc.Procedures; -import org.neo4j.kernel.impl.transaction.TransactionMonitor; -import org.neo4j.kernel.internal.DatabaseHealth; -import org.neo4j.kernel.lifecycle.LifecycleAdapter; - -import static org.neo4j.graphdb.factory.GraphDatabaseSettings.transaction_timeout; - -/** - * This is the Neo4j Kernel, an implementation of the Kernel API which is an internal component used by Cypher and the - * Core API (the API under org.neo4j.graphdb). - * - *

Structure

- * - * The Kernel lets you start transactions. The transactions allow you to create "statements", which, in turn, operate - * against the database. Statements and transactions are separate concepts due to isolation requirements. A single - * cypher query will normally use one statement, and there can be multiple statements executed in one transaction. - * - * Please refer to the {@link KernelTransaction} javadoc for details. - * - */ -public class KernelImpl extends LifecycleAdapter implements InwardKernel -{ - private final KernelTransactions transactions; - private final TransactionHooks hooks; - private final DatabaseHealth health; - private final TransactionMonitor transactionMonitor; - private final Procedures procedures; - private final Config config; - private volatile boolean isRunning; - - public KernelImpl( KernelTransactions transactionFactory, TransactionHooks hooks, DatabaseHealth health, TransactionMonitor transactionMonitor, - Procedures procedures, Config config ) - { - this.transactions = transactionFactory; - this.hooks = hooks; - - //NOTE: register hook - if(Settings._hookEnabled()) - hooks.register(new CustomPropertyNodeStoreHook()); - - this.health = health; - this.transactionMonitor = transactionMonitor; - this.procedures = procedures; - this.config = config; - } - - @Override - public KernelTransaction beginTransaction( Transaction.Type type, LoginContext loginContext ) throws TransactionFailureException - { - if ( !isRunning ) - { - throw new IllegalStateException( "Kernel is not running, so it is not possible to use it" ); - } - return beginTransaction( type, loginContext, config.get( transaction_timeout ).toMillis() ); - } - - @Override - public KernelTransaction beginTransaction( Transaction.Type type, LoginContext loginContext, long timeout ) throws - TransactionFailureException - { - health.assertHealthy( TransactionFailureException.class ); - KernelTransaction transaction = transactions.newInstance( type, loginContext, timeout ); - transactionMonitor.transactionStarted(); - return transaction; - } - - @Override - public void registerTransactionHook( TransactionHook hook ) - { - hooks.register( hook ); - } - - @Override - public void registerProcedure( CallableProcedure procedure ) throws ProcedureException - { - procedures.register( procedure ); - } - - @Override - public void registerUserFunction( CallableUserFunction function ) throws ProcedureException - { - procedures.register( function ); - } - - @Override - public void registerUserAggregationFunction( CallableUserAggregationFunction function ) throws ProcedureException - { - procedures.register( function ); - } - - @Override - public void start() - { - isRunning = true; - } - - @Override - public void stop() - { - if ( !isRunning ) - { - throw new IllegalStateException( "kernel is not running, so it is not possible to stop it" ); - } - isRunning = false; - } -} diff --git a/src/externel-properties/resources/gNode.properties b/src/externel-properties/resources/gNode.properties deleted file mode 100644 index 87afa0c8..00000000 --- a/src/externel-properties/resources/gNode.properties +++ /dev/null @@ -1,4 +0,0 @@ -zkServer=10.0.86.26:2181 -gNodeServiceAddress=159.226.193.204:7688 -sessionTimeout=20000 -registryPath=/gnodes \ No newline at end of file diff --git a/src/externel-properties/scala/cn/graiph/cnode/GNodeList.scala b/src/externel-properties/scala/cn/graiph/cnode/GNodeList.scala deleted file mode 100644 index 4e48da1b..00000000 --- a/src/externel-properties/scala/cn/graiph/cnode/GNodeList.scala +++ /dev/null @@ -1,45 +0,0 @@ -package cn.graiph.cnode - -import java.io.FileInputStream -import java.util.Properties - - -/** - * Created by bluejoe on 2019/11/4. - */ -case class NodeAddress(host: String, port: Int) { - -} - -object NodeAddress { - def fromString(url: String, separtor: String = ":" ): NodeAddress = { - val pair = url.split(separtor) - NodeAddress(pair(0), pair(1).toInt) - } -} - - -object ZKConstants { - val path = Thread.currentThread().getContextClassLoader.getResource("gNode.properties").getPath; - val prop = new Properties() - prop.load(new FileInputStream(path)) - val localServiceAddress = prop.getProperty("localhostServiceAdress") - val zkServerAddress = prop.getProperty("zkServerAddress") - val sessionTimeout = prop.getProperty("sessionTimeout").toInt - val connectionTimeout = prop.getProperty("connectionTimeout") - val registryPath = prop.getProperty("registryPath") -} - -trait GNodeList { - def getReadNodes(): Array[NodeAddress]; - - def getWriteNodes(): Array[NodeAddress]; -} - -trait GNodeSelector { - def chooseReadNode(): NodeAddress; - - def chooseWriteNode(): NodeAddress; -} - - diff --git a/src/externel-properties/scala/cn/graiph/cnode/ZKServiceDiscovery.scala b/src/externel-properties/scala/cn/graiph/cnode/ZKServiceDiscovery.scala deleted file mode 100644 index 861cb1f1..00000000 --- a/src/externel-properties/scala/cn/graiph/cnode/ZKServiceDiscovery.scala +++ /dev/null @@ -1,79 +0,0 @@ -package cn.graiph.cnode - -import scala.collection.JavaConversions._ -import org.apache.zookeeper.Watcher.Event.EventType -import org.apache.zookeeper.{WatchedEvent, Watcher, ZooKeeper} -import scala.collection.mutable.ArrayBuffer - -class ZKGNodeList extends GNodeList { - - val zkServerAddress = ZKConstants.zkServerAddress - val zkClient = new ZooKeeper(zkServerAddress, ZKConstants.sessionTimeout, new Watcher(){ - override def process(watchedEvent: WatchedEvent): Unit = { - if(watchedEvent.getType == EventType.None) - return - try { - updataServers(); - } catch { - case ex: Exception =>{ - ex.printStackTrace() - } - } - } - }); - - val readNodePath = ZKConstants.registryPath + "/" + "read" - val writeNodePath = ZKConstants.registryPath + "/" + "write" - - //TO DO: How to implement? - def updataServers() { - val childrenList = zkClient.getChildren(ZKConstants.registryPath,true) - } - - - override def getReadNodes(): Array[NodeAddress] = { - val children = zkClient.getChildren(readNodePath,true) - val nodeList = ArrayBuffer[NodeAddress]() - for(child <- children){ - nodeList.append(NodeAddress.fromString(child)) - } - val gNodelist = nodeList.toArray - //at least 1 read node - if(gNodelist.length < 1){ - throw new Exception(s"Readable node is less than 1.") - } - gNodelist - } - - override def getWriteNodes(): Array[NodeAddress] = { - val children = zkClient.getChildren(writeNodePath,true) - val nodeList = ArrayBuffer[NodeAddress]() - - for(child <- children){ - nodeList.append(NodeAddress.fromString(child)) - } - val gNodelist = nodeList.toArray - //at least 2 write nodes - if(gNodelist.length < 2){ - throw new Exception(s"Writable nodes are less than 2.") - } - gNodelist - } - -} - -//TO DO: implement fully functioned selector -class ZKGNodeSelector extends GNodeSelector{ - val zkGNodeList = new ZKGNodeList; - - override def chooseReadNode(): NodeAddress = { - val readNodeList = zkGNodeList.getReadNodes() - readNodeList(0) - } - - override def chooseWriteNode(): NodeAddress = { - val writeNodeList = zkGNodeList.getWriteNodes() - writeNodeList(1) - } - -} \ No newline at end of file diff --git a/src/externel-properties/scala/cn/graiph/cnode/ZKServiceRegistry.scala b/src/externel-properties/scala/cn/graiph/cnode/ZKServiceRegistry.scala deleted file mode 100644 index bf7bab99..00000000 --- a/src/externel-properties/scala/cn/graiph/cnode/ZKServiceRegistry.scala +++ /dev/null @@ -1,60 +0,0 @@ -package cn.graiph.cnode - -import org.apache.zookeeper.{CreateMode, WatchedEvent, Watcher, ZooKeeper} -import org.apache.zookeeper.ZooDefs.Ids - -trait ServiceRegistry { - def registry(serviceName: String, serviceAddress: String); -} - -class ZKServiceRegistry extends ServiceRegistry{ - - val localhostServiceAddress = ZKConstants.localServiceAddress - val zkServerAddress = ZKConstants.zkServerAddress - - //error if the watcher is set null - val zkClient = new ZooKeeper(zkServerAddress,ZKConstants.sessionTimeout,new Watcher { - override def process(watchedEvent: WatchedEvent): Unit = { - } - }) - - override def registry(serviceName: String, serviceAddress: String): Unit = { - val registryPath = ZKConstants.registryPath - - /* node mode in zk: - * gnode - * / \ - * read write - * / \ - * address1 address2 - * - */ - // Create registry node (persistent) - if(zkClient.exists(registryPath, false) == null){ - zkClient.create(ZKConstants.registryPath, null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT) - } - - // Create service node (persistent) - val servicePath = registryPath + s"/" + serviceName - if(zkClient.exists(servicePath,false) == null){ - zkClient.create(servicePath,null,Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT) - } - - // Create address node (temp) - val serviceAddress = servicePath +"/" + localhostServiceAddress - if(zkClient.exists(serviceAddress,false) == null){ - zkClient.create(serviceAddress, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL) - } - - } - - def registerAsReadNode(serviceAddress: String): Unit ={ - registry(s"read", serviceAddress) - } - - def registerAsWriteNode(serviceAddress: String): Unit ={ - registry(s"write", serviceAddress) - } - -} - diff --git a/src/externel-properties/scala/cn/graiph/util/ContextMap.scala b/src/externel-properties/scala/cn/graiph/util/ContextMap.scala deleted file mode 100644 index 49119622..00000000 --- a/src/externel-properties/scala/cn/graiph/util/ContextMap.scala +++ /dev/null @@ -1,47 +0,0 @@ -package cn.graiph.util - -import scala.collection.mutable.{Map => MMap} -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -object GlobalContext extends ContextMap { - -} - -class ContextMap { - private val _map = MMap[String, Any](); - - def put[T](key: String, value: T): T = { - _map(key) = value - value - }; - - def put[T](value: T)(implicit manifest: Manifest[T]): T = put[T](manifest.runtimeClass.getName, value) - - def get[T](key: String): T = { - _map(key).asInstanceOf[T] - }; - - def getOption[T](key: String): Option[T] = _map.get(key).map(_.asInstanceOf[T]); - - def get[T]()(implicit manifest: Manifest[T]): T = get(manifest.runtimeClass.getName); - - def getOption[T]()(implicit manifest: Manifest[T]): Option[T] = getOption(manifest.runtimeClass.getName); -} diff --git a/src/externel-properties/scala/org/neo4j/bolt/v1/runtime/DispatchedStatementProcessor.scala b/src/externel-properties/scala/org/neo4j/bolt/v1/runtime/DispatchedStatementProcessor.scala deleted file mode 100644 index 1cdcb333..00000000 --- a/src/externel-properties/scala/org/neo4j/bolt/v1/runtime/DispatchedStatementProcessor.scala +++ /dev/null @@ -1,76 +0,0 @@ -package org.neo4j.bolt.v1.runtime - -import java.lang.Iterable -import java.time.{Clock, Duration} -import java.util - -import cn.graiph.cnode.GNodeSelector -import org.neo4j.bolt.runtime.{BoltResult, StatementMetadata, StatementProcessor} -import org.neo4j.bolt.v1.runtime.bookmarking.Bookmark -import org.neo4j.cypher.result.QueryResult -import org.neo4j.cypher.result.QueryResult.QueryResultVisitor -import org.neo4j.driver._ -import org.neo4j.function.ThrowingConsumer -import org.neo4j.graphdb.{ExecutionPlanDescription, Notification, QueryExecutionType, QueryStatistics} -import org.neo4j.values.virtual.MapValue - -/** - * Created by bluejoe on 2019/11/4. - */ -class DispatchedStatementProcessor(source: StatementProcessor, selector: GNodeSelector) extends StatementProcessor { - var _currentStatementResult: StatementResult = _; - - override def markCurrentTransactionForTermination(): Unit = source.markCurrentTransactionForTermination() - - override def commitTransaction(): Bookmark = source.commitTransaction() - - override def run(statement: String, params: MapValue): StatementMetadata = source.run(statement, params) - - override def run(statement: String, params: MapValue, bookmark: Bookmark, txTimeout: Duration, txMetaData: util.Map[String, AnyRef]): StatementMetadata = { - //pickup a runnable node - val node = selector.chooseReadNode(); - val url = ""; - val driver = GraphDatabase.driver(url, AuthTokens.basic("", "")); - val s = driver.session(); - _currentStatementResult = s.run(statement, params.asInstanceOf[Value]); - //extract metadata from _currentStatementResult. - null - } - - override def streamResult(resultConsumer: ThrowingConsumer[BoltResult, Exception]): Bookmark = { - resultConsumer.accept(new CypherAdapterStream(new QueryResultAdapter(_currentStatementResult), Clock.systemUTC())); - //return bookmark - Bookmark.fromParamsOrNull(null); - } - - override def hasOpenStatement: Boolean = source.hasOpenStatement - - override def rollbackTransaction(): Unit = source.rollbackTransaction() - - override def hasTransaction: Boolean = source.hasTransaction - - override def reset(): Unit = source.reset() - - override def validateTransaction(): Unit = source.validateTransaction() - - override def beginTransaction(bookmark: Bookmark): Unit = source.beginTransaction(bookmark) - - override def beginTransaction(bookmark: Bookmark, txTimeout: Duration, txMetadata: util.Map[String, AnyRef]): Unit = source.beginTransaction(bookmark, txTimeout, txMetadata) -} - - -class QueryResultAdapter(result: StatementResult) extends QueryResult { - override def getNotifications: Iterable[Notification] = ??? - - override def executionType(): QueryExecutionType = ??? - - override def queryStatistics(): QueryStatistics = ??? - - override def fieldNames(): Array[String] = ??? - - override def accept[E <: Exception](queryResultVisitor: QueryResultVisitor[E]): Unit = ??? - - override def executionPlanDescription(): ExecutionPlanDescription = ??? - - override def close(): Unit = ??? -} \ No newline at end of file diff --git a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/AllNodesScanPipe.scala b/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/AllNodesScanPipe.scala deleted file mode 100644 index a82381c8..00000000 --- a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/AllNodesScanPipe.scala +++ /dev/null @@ -1,63 +0,0 @@ -package org.neo4j.cypher.internal.runtime.interpreted.pipes - -import org.neo4j.cypher.internal.runtime.interpreted.commands.expressions.{Expression, ParameterExpression, Property} -import org.neo4j.cypher.internal.runtime.interpreted.commands.predicates.{GreaterThan, GreaterThanOrEqual} -import org.neo4j.cypher.internal.runtime.interpreted.commands.predicates.{LessThan, LessThanOrEqual, Equals} -import org.neo4j.cypher.internal.runtime.interpreted._ -import org.neo4j.cypher.internal.v3_5.util.attribution.Id -import org.neo4j.kernel.impl.CustomPropertyNodeStoreHolder -import org.neo4j.values.virtual.NodeValue - -case class AllNodesScanPipe(ident: String)(val id: Id = Id.INVALID_ID) extends Pipe { - - var _optPredicate: Option[Expression] = None; - - def predicatePushDown(predicate: Expression): Unit = { - _optPredicate = Some(predicate); - } - - protected def internalCreateResults(state: QueryState): Iterator[ExecutionContext] = { - val baseContext = state.newExecutionContext(executionContextFactory) - val nodes: Iterator[NodeValue] = _optPredicate match { - case Some(predicate) => - predicate match { - case GreaterThan(a: Property, b: ParameterExpression) => { - val value = b.apply(baseContext, state) - CustomPropertyNodeStoreHolder.get.filterNodes(NFGreaterThan(a.propertyKey.name, value)). - map(_.toNeo4jNodeValue()).iterator - } - - case GreaterThanOrEqual(a: Property, b: ParameterExpression) => { - val value = b.apply(baseContext, state) - CustomPropertyNodeStoreHolder.get.filterNodes(NFGreaterThanOrEqual(a.propertyKey.name, value)). - map(_.toNeo4jNodeValue()).iterator - } - - case LessThan(a: Property, b: ParameterExpression) => { - val value = b.apply(baseContext, state) - CustomPropertyNodeStoreHolder.get.filterNodes(NFLessThan(a.propertyKey.name, value)). - map(_.toNeo4jNodeValue()).iterator - } - - case LessThanOrEqual(a: Property, b: ParameterExpression) => { - val value = b.apply(baseContext, state) - CustomPropertyNodeStoreHolder.get.filterNodes(NFLessThanOrEqual(a.propertyKey.name, value)). - map(_.toNeo4jNodeValue()).iterator - } - - case Equals(a: Property, b: ParameterExpression) => { - val value = b.apply(baseContext, state) - CustomPropertyNodeStoreHolder.get.filterNodes(NFEquals(a.propertyKey.name, value)). - map(_.toNeo4jNodeValue()).iterator - } - - case _ => state.query.nodeOps.all - - } - - case _ => state.query.nodeOps.all - } - - nodes.map(n => executionContextFactory.copyWith(baseContext, ident, n)) - } -} diff --git a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/DeletePipe.scala b/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/DeletePipe.scala deleted file mode 100644 index e59e391c..00000000 --- a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/DeletePipe.scala +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.cypher.internal.runtime.interpreted.pipes - -import org.neo4j.cypher.internal.v3_5.util.CypherTypeException -import org.neo4j.cypher.internal.runtime.interpreted.ExecutionContext -import org.neo4j.cypher.internal.runtime.interpreted.commands.expressions.Expression -import org.neo4j.cypher.internal.runtime.interpreted.GraphElementPropertyFunctions -import org.neo4j.cypher.internal.v3_5.util.attribution.Id -import org.neo4j.values.storable.Values -import org.neo4j.values.virtual.{RelationshipValue, NodeValue, PathValue} -import org.neo4j.kernel.impl.CustomPropertyNodeStoreHolder - -import scala.collection.JavaConverters._ - - -case class DeletePipe(src: Pipe, expression: Expression, forced: Boolean) - (val id: Id = Id.INVALID_ID) - extends PipeWithSource(src) with GraphElementPropertyFunctions { - - expression.registerOwningPipe(this) - - - override protected def internalCreateResults(input: Iterator[ExecutionContext], - state: QueryState): Iterator[ExecutionContext] = { - input.map { row => - expression(row, state) match { - case Values.NO_VALUE => // do nothing - case r: RelationshipValue => - deleteRelationship(r, state) - case n: NodeValue => - deleteNode(n, state) - case p: PathValue => - deletePath(p, state) - case other => - throw new CypherTypeException(s"Expected a Node, Relationship or Path, but got a ${other.getClass.getSimpleName}") - } - row - } - } - - private def deleteNode(n: NodeValue, state: QueryState) = if (!state.query.nodeOps.isDeletedInThisTx(n.id())) { - if (forced) CustomPropertyNodeStoreHolder.get.deleteNodes(List(n.id())) - else CustomPropertyNodeStoreHolder.get.deleteNodes(List(n.id())) - } - - private def deleteRelationship(r: RelationshipValue, state: QueryState) = - if (!state.query.relationshipOps.isDeletedInThisTx(r.id())) state.query.relationshipOps.delete(r.id()) - - private def deletePath(p: PathValue, state: QueryState) = p.asList().iterator().asScala.foreach { - case n: NodeValue => - deleteNode(n, state) - case r: RelationshipValue => - deleteRelationship(r, state) - case other => - throw new CypherTypeException(s"Expected a Node or Relationship, but got a ${other.getClass.getSimpleName}") - } -} diff --git a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/RemoveLabelsPipe.scala b/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/RemoveLabelsPipe.scala deleted file mode 100644 index b3b63380..00000000 --- a/src/externel-properties/scala/org/neo4j/cypher/internal/runtime/interpreted/pipes/RemoveLabelsPipe.scala +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -package org.neo4j.cypher.internal.runtime.interpreted.pipes - -import org.neo4j.cypher.internal.runtime.interpreted.ExecutionContext -import org.neo4j.cypher.internal.runtime.interpreted.CastSupport -import org.neo4j.cypher.internal.runtime.interpreted.GraphElementPropertyFunctions -import org.neo4j.cypher.internal.v3_5.util.attribution.Id -import org.neo4j.values.storable.Values -import org.neo4j.values.virtual.VirtualNodeValue -import org.neo4j.kernel.impl.{CustomPropertyNodeStoreHolder,CustomPropertyNodeModification} - -case class RemoveLabelsPipe(src: Pipe, variable: String, labels: Seq[LazyLabel]) - (val id: Id = Id.INVALID_ID) - extends PipeWithSource(src) with GraphElementPropertyFunctions { - - override protected def internalCreateResults(input: Iterator[ExecutionContext], - state: QueryState): Iterator[ExecutionContext] = { - input.map { row => - val item = row.get(variable).get - if (item != Values.NO_VALUE) removeLabels(row, state, CastSupport.castOrFail[VirtualNodeValue](item).id) - row - } - } - - private def removeLabels(context: ExecutionContext, state: QueryState, nodeId: Long) = { - //val labelIds = labels.flatMap(_.getOptId(state.query)).map(_.id) - // state.query.removeLabelsFromNode(nodeId, labelIds.iterator) - val labelNames = labels.map(x=>x.name) - CustomPropertyNodeStoreHolder.get.updateNodes(Some( - new CustomPropertyNodeModification(nodeId,null,null,null,null,labelNames) - )) - - } -} diff --git a/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStore.scala b/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStore.scala deleted file mode 100644 index 87ca79de..00000000 --- a/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStore.scala +++ /dev/null @@ -1,42 +0,0 @@ -package org.neo4j.kernel.impl - -import org.neo4j.cypher.internal.runtime.interpreted.NFPredicate -import org.neo4j.values.storable.{Value, Values} -import org.neo4j.values.virtual.{NodeValue, VirtualValues} - -/** - * Created by bluejoe on 2019/10/7. - */ -trait CustomPropertyNodeStore { - def deleteNodes(docsToBeDeleted: Iterable[Long]); - - def addNodes(docsToAdded: Iterable[CustomPropertyNode]); - - def updateNodes(docsToUpdated: Iterable[CustomPropertyNodeModification]); - - def init(); - - def filterNodes(expr: NFPredicate): Iterable[CustomPropertyNode]; - - def getNodesByLabel(label: String): Iterable[CustomPropertyNode]; -} - -case class CustomPropertyNodeModification( - id: Long, - fieldsAdded: Map[String, Value], - fieldsRemoved: Iterable[String], - fieldsUpdated: Map[String, Value], - labelsAdded: Iterable[String], - labelsRemoved: Iterable[String]) { - -} - -case class CustomPropertyNode(id: Long, fields: Map[String, Value], labels: Iterable[String]) { - def field(name: String): Option[Value] = fields.get(name) - - def toNeo4jNodeValue(): NodeValue = { - VirtualValues.nodeValue(id, - Values.stringArray(labels.toArray: _*), - VirtualValues.map(fields.keys.toArray, fields.values.toArray)) - } -} diff --git a/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStoreHolder.scala b/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStoreHolder.scala deleted file mode 100644 index a231b861..00000000 --- a/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStoreHolder.scala +++ /dev/null @@ -1,57 +0,0 @@ -package org.neo4j.kernel.impl - -import cn.graiph.util.Logging -import org.neo4j.cypher.internal.runtime.interpreted.NFPredicate - -/** - * Created by bluejoe on 2019/10/7. - */ -object CustomPropertyNodeStoreHolder { - var _propertyNodeStore: Option[CustomPropertyNodeStore] = None; - - def hold(propertyNodeStore: CustomPropertyNodeStore): Unit = { - _propertyNodeStore = Some(propertyNodeStore); - propertyNodeStore.init(); - } - - def get: CustomPropertyNodeStore = _propertyNodeStore.get; -} - -object Settings { - var _hookEnabled = false; - var _patternMatchFirst = true; -} - -class LoggingPropertiesStore(source: CustomPropertyNodeStore) extends CustomPropertyNodeStore with Logging { - override def deleteNodes(docsToBeDeleted: Iterable[Long]): Unit = { - logger.debug(s"deleteNodes: $docsToBeDeleted") - source.deleteNodes(docsToBeDeleted) - } - - override def init(): Unit = { - logger.debug(s"init()") - source.init() - } - - override def addNodes(docsToAdded: Iterable[CustomPropertyNode]): Unit = { - logger.debug(s"addNodes:$docsToAdded") - source.addNodes(docsToAdded) - } - - override def filterNodes(expr: NFPredicate): Iterable[CustomPropertyNode] = { - val ir = source.filterNodes(expr) - logger.debug(s"filterNodes(expr=$expr): $ir") - ir; - } - - override def updateNodes(docsToUpdated: Iterable[CustomPropertyNodeModification]): Unit = { - logger.debug(s"docsToUpdated: $docsToUpdated") - source.updateNodes(docsToUpdated) - } - - override def getNodesByLabel(label: String): Iterable[CustomPropertyNode] = { - val res = source.getNodesByLabel(label) - logger.debug(s"getNodesByLabel: result=> $res") - res - } -} diff --git a/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStoreHook.scala b/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStoreHook.scala deleted file mode 100644 index f3c8bd64..00000000 --- a/src/externel-properties/scala/org/neo4j/kernel/impl/CustomPropertyNodeStoreHook.scala +++ /dev/null @@ -1,100 +0,0 @@ -package org.neo4j.kernel.impl - -import org.eclipse.collections.api.block.function.primitive.{IntToObjectFunction, LongToObjectFunction} -import org.neo4j.kernel.api.{KernelTransaction, TransactionHook} -import org.neo4j.storageengine.api.StorageReader -import org.neo4j.storageengine.api.txstate.ReadableTransactionState -import org.neo4j.values.storable.Value - -import scala.collection.JavaConversions._ - -/** - * Created by bluejoe on 2019/10/6. - */ -class CustomPropertyNodeStoreHook extends TransactionHook[TransactionHook.Outcome] { - override def afterRollback(state: ReadableTransactionState, transaction: KernelTransaction, outcome: TransactionHook.Outcome): Unit = { - //discard update - } - - override def afterCommit(state: ReadableTransactionState, transaction: KernelTransaction, outcome: TransactionHook.Outcome): Unit = { - } - - override def beforeCommit(state: ReadableTransactionState, transaction: KernelTransaction, storageReader: StorageReader): TransactionHook.Outcome = { - if (state.hasDataChanges) { - - //save solr documents - val aar = state.addedAndRemovedNodes(); - val tokens = transaction.tokenRead(); - - //save added nodes - val docsToBeAdded = aar.getAdded.collect(new LongToObjectFunction[CustomPropertyNode]() { - override def valueOf(l: Long): CustomPropertyNode = { - new CustomPropertyNode(l, state.getNodeState(l).addedProperties().map { prop => - val key = tokens.propertyKeyName(prop.propertyKeyId()) - val value = prop.value() - key -> value - }.toMap, state.getNodeState(l).labelDiffSets().getAdded.collect(new LongToObjectFunction[String] { - override def valueOf(l: Long): String = tokens.nodeLabelName(l.toInt) - })) - } - }).toList - - if (!docsToBeAdded.isEmpty) { - CustomPropertyNodeStoreHolder.get.addNodes(docsToBeAdded); - } - - //delete removed nodes - //maybe wrong - val docsToBeDeleted = aar.getRemoved.collect(new LongToObjectFunction[Long]() { - override def valueOf(l: Long): Long = l - }).toList - - if (!docsToBeDeleted.isEmpty) { - CustomPropertyNodeStoreHolder.get.deleteNodes(docsToBeDeleted) - } - - //TODO-1: update modified nodes - val mn = state.modifiedNodes(); - //_propertyNodeStore.updateNodes - if (mn.nonEmpty) { - val docsToBeUpdated = mn.map(ns => { - - val id = ns.getId - - val fieldsAdded: Map[String, Value] = ns.addedProperties().map(prop => { - val key = tokens.propertyKeyName(prop.propertyKeyId()) - val value = prop.value() - key -> value - }).toMap - - val fieldsRemoved: Iterable[String] = ns.removedProperties().collect(new IntToObjectFunction[String] { - override def valueOf(intParameter: Int): String = tokens.nodeLabelName(intParameter) - }) - - val fieldsUpdated: Map[String, Value] = ns.changedProperties().map(prop => { - val key = tokens.propertyKeyName(prop.propertyKeyId()) - val value = prop.value() - key -> value - }).toMap - - val labelsAdded: Iterable[String] = ns.labelDiffSets().getAdded.collect(new LongToObjectFunction[String] { - override def valueOf(longParameter: Long): String = tokens.nodeLabelName(longParameter.toInt) - }) - - val labelsRemoved: Iterable[String] = ns.labelDiffSets().getRemoved.collect(new LongToObjectFunction[String] { - override def valueOf(longParameter: Long): String = tokens.nodeLabelName(longParameter.toInt) - }) - CustomPropertyNodeModification(id, fieldsAdded, fieldsRemoved, fieldsUpdated, labelsAdded, labelsRemoved) - }) - - CustomPropertyNodeStoreHolder.get.updateNodes(docsToBeUpdated) - } - } - - new TransactionHook.Outcome { - override def failure(): Throwable = null - - override def isSuccessful: Boolean = true - } - } -} diff --git a/src/externel-properties/scala/org/neo4j/kernel/impl/InMemoryPropertyNodeStore.scala b/src/externel-properties/scala/org/neo4j/kernel/impl/InMemoryPropertyNodeStore.scala deleted file mode 100644 index 1fd08286..00000000 --- a/src/externel-properties/scala/org/neo4j/kernel/impl/InMemoryPropertyNodeStore.scala +++ /dev/null @@ -1,69 +0,0 @@ -package org.neo4j.kernel.impl - -import org.neo4j.cypher.internal.runtime.interpreted._ -import org.neo4j.values.AnyValue -import org.neo4j.values.storable.NumberValue - -import scala.collection.mutable - -/** - * Created by bluejoe on 2019/10/7. - */ -class InMemoryPropertyNodeStore extends CustomPropertyNodeStore { - val nodes = mutable.Map[Long, CustomPropertyNode](); - - def filterNodes(expr: NFPredicate): Iterable[CustomPropertyNode] = { - expr match { - case NFGreaterThan(fieldName: String, value: AnyValue) => { - nodes.values.filter(x => x.field(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() > - value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) - } - - case NFLessThan(fieldName: String, value: AnyValue) => { - nodes.values.filter(x => x.field(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() < - value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) - } - - case NFLessThanOrEqual(fieldName: String, value: AnyValue) => { - nodes.values.filter(x => x.field(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() <= - value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) - } - - case NFGreaterThanOrEqual(fieldName: String, value: AnyValue) => { - nodes.values.filter(x => x.field(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() >= - value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) - } - - case NFEquals(fieldName: String, value: AnyValue) => { - nodes.values.filter(x => x.field(fieldName).map(_.asInstanceOf[NumberValue].doubleValue() == - value.asInstanceOf[NumberValue].doubleValue()).getOrElse(false)) - } - } - } - - - override def deleteNodes(docsToBeDeleted: Iterable[Long]): Unit = { - nodes --= docsToBeDeleted - } - - override def addNodes(docsToAdded: Iterable[CustomPropertyNode]): Unit = { - nodes ++= docsToAdded.map(x => x.id -> x) - } - - override def init(): Unit = { - } - - override def updateNodes(docsToUpdated: Iterable[CustomPropertyNodeModification]): Unit = { - - } - - override def getNodesByLabel(label: String): Iterable[CustomPropertyNode] = { - val res = mutable.ArrayBuffer[CustomPropertyNode]() - nodes.map(n=>{ - if(n._2.labels.toArray.contains(label) ) - res.append(n._2) - }) - res - } - -} diff --git a/src/externel-properties/scala/org/neo4j/kernel/impl/InSolrPropertyNodeStore.scala b/src/externel-properties/scala/org/neo4j/kernel/impl/InSolrPropertyNodeStore.scala deleted file mode 100644 index d46e2325..00000000 --- a/src/externel-properties/scala/org/neo4j/kernel/impl/InSolrPropertyNodeStore.scala +++ /dev/null @@ -1,190 +0,0 @@ -package org.neo4j.kernel.impl - -import org.apache.solr.client.solrj.impl.CloudSolrClient -import org.apache.solr.common.{SolrDocument, SolrInputDocument} -import org.neo4j.cypher.internal.runtime.interpreted.{NFLessThan, NFPredicate, _} - -import org.apache.solr.client.solrj.SolrQuery - -import org.neo4j.values.storable.{Value, Values} - -import scala.collection.JavaConversions._ - -import scala.collection.mutable.ArrayBuffer - -/** - * Created by bluejoe on 2019/10/7. - */ -class InSolrPropertyNodeStore extends CustomPropertyNodeStore { - var _solrClient: Option[CloudSolrClient] = None; - override def deleteNodes(docsToBeDeleted: Iterable[Long]): Unit = { - _solrClient.get.deleteById(docsToBeDeleted.map(_.toString).toList); - _solrClient.get.commit(); - } - - override def addNodes(docsToAdded: Iterable[CustomPropertyNode]): Unit = { - _solrClient.get.add(docsToAdded.map { x => - val doc = new SolrInputDocument(); - x.fields.foreach(y => doc.addField(y._1, y._2.asObject)); - doc.addField("id",x.id); - doc.addField("labels",x.labels.mkString(",")); - doc - }) - _solrClient.get.commit(); - } - - override def init(): Unit = { - val zkUrl = "10.0.86.179:2181,10.0.87.45:2181,10.0.87.46:2181" - val collectionName = "graiphdb" - val _client = new CloudSolrClient(zkUrl); - _client.setZkClientTimeout(30000); - _client.setZkConnectTimeout(50000); - _client.setDefaultCollection(collectionName); - _solrClient = Some(_client); - } - - def predicate2SolrQuery(expr: NFPredicate):String ={ - var q: Option[String] = None - expr match { - case expr: NFGreaterThan => { - val paramValue = expr.value.asInstanceOf[Value].asObject() - val paramKey = expr.propName - q = Some(s"$paramKey:{ $paramValue TO * }") - } - case expr: NFGreaterThanOrEqual => { - val paramValue = expr.value.asInstanceOf[Value].asObject() - val paramKey = expr.propName - q = Some(s"$paramKey:[ $paramValue TO * ]") - } - case expr: NFLessThan => { - val paramValue = expr.value.asInstanceOf[Value].asObject() - val paramKey = expr.propName - q = Some(s"$paramKey:{ * TO $paramValue}") - } - case expr: NFLessThanOrEqual => { - val paramValue = expr.value.asInstanceOf[Value].asObject() - val paramKey = expr.propName - q = Some(s"$paramKey:[ * TO $paramValue]") - } - case expr: NFEquals => { - val paramValue = expr.value.asInstanceOf[Value].asObject() - val paramKey = expr.propName - q = Some(s"$paramKey:$paramValue") - } - case expr: NFNotEquals => { - val paramValue = expr.value.asInstanceOf[Value].asObject() - val paramKey = expr.propName - q = Some(s"-$paramKey:$paramValue") - } - case expr: NFNotNull => { - val paramKey = expr.propName - q = Some(s"$paramKey:*") - } - case expr: NFIsNull => { - val paramKey = expr.propName - q = Some(s"-$paramKey:*") - } - case expr: NFTrue => { - q = Some(s"*:*") - } - case expr: NFFalse => { - q = Some(s"-*:*") - } - case expr: NFStartsWith => { - val paramValue = expr.text - val paramKey = expr.propName - q = Some(s"$paramKey:$paramValue*") - } - case expr: NFEndsWith => { - val paramValue = expr.text - val paramKey = expr.propName - q = Some(s"$paramKey:*$paramValue") - } - case expr: NFHasProperty => { - val paramKey = expr.propName - q = Some(s"$paramKey:[* TO *]") - } - case expr: NFContainsWith => { - val paramValue = expr.text - val paramKey = expr.propName - q = Some(s"$paramKey:*$paramValue*") - } - case expr: NFRegexp => { - val paramValue = expr.text.replace(".","") - val paramKey = expr.propName - q = Some(s"$paramKey:$paramValue") - } - case _ => q=None - } - q.get - } - - override def filterNodes(expr: NFPredicate): Iterable[CustomPropertyNode] = { - val nodeArray = ArrayBuffer[CustomPropertyNode]() - var q: Option[String] = None; - expr match { - case expr:NFAnd => { - val q1 = predicate2SolrQuery(expr.a) - val q2 = predicate2SolrQuery(expr.b) - q = Some(s"$q1 and $q2") - } - case expr:NFOr => { - val q1 = predicate2SolrQuery(expr.a) - val q2 = predicate2SolrQuery(expr.b) - q = Some(s"$q1 or $q2") - } - case expr:NFNot => { - val q1 = predicate2SolrQuery(expr.a) - q = if (q1.indexOf("-")>=0) Some(s"${q1.substring(q1.indexOf("-") + 1)}") else Some(s"-$q1") - } - case _ => { - val q1 = predicate2SolrQuery(expr) - q = Some(s"$q1") - } - } - _solrClient.get.query(new SolrQuery().setQuery(q.get)).getResults().foreach( - x => { - val id = x.get("id") - val labels = x.get("labels").toString.split(",") - val tik = "id,labels,_version_" - val fieldsName = x.getFieldNames - val fields = for (y <- fieldsName if tik.indexOf(y) < 0) yield (y, Values.of(x.get(y).toString)) - nodeArray += CustomPropertyNode(id.toString.toLong, fields.toMap, labels) - } - ) - nodeArray - } - - def getCustomPropertyNodeByid(id:Long):SolrDocument={ - _solrClient.get.getById(id.toString) - } - - def modif2node(node: CustomPropertyNodeModification):CustomPropertyNode={ - val doc = getCustomPropertyNodeByid(node.id) - val labels = if (doc.get("labels") == null) ArrayBuffer[String]() else { - val labelsq = doc.get("labels").toString - val labelsTemp = labelsq.substring(labelsq.indexOf('[') + 1, labelsq.indexOf(']')) - labelsTemp.split(",").toBuffer - } - node.labelsAdded.foreach(label => if (!labels.contains(label)) labels +=label) - node.labelsRemoved.foreach(label => labels -=label) - val tik = "id,labels,_version_" - val fieldsName = doc.getFieldNames - val fields = for (y <- fieldsName if tik.indexOf(y) < 0) yield (y, Values.of(doc.get(y).toString)) - var fieldMap = fields.toMap - node.fieldsAdded.foreach(fd => fieldMap += fd) - node.fieldsRemoved.foreach(fd => fieldMap -=fd) - node.fieldsUpdated.foreach(fd => fieldMap += fd) - CustomPropertyNode(node.id, fieldMap, labels) - } - - override def updateNodes(docsToUpdated: Iterable[CustomPropertyNodeModification]): Unit = { - val docsToAdded = for(doc <- docsToUpdated) yield (modif2node(doc)) - addNodes(docsToAdded) - } - - override def getNodesByLabel(label: String): Iterable[CustomPropertyNode] = { - val propName = "labels" - filterNodes(NFContainsWith(propName,label)) - } -} diff --git a/src/graiph-database/resources/logo.txt b/src/graiph-database/resources/logo.txt deleted file mode 100644 index 90c00615..00000000 --- a/src/graiph-database/resources/logo.txt +++ /dev/null @@ -1,8 +0,0 @@ - _____ _____ _ _____ ____ - / ____| /\ |_ _| | | | __ \| _ \ - | | __ _ __ / \ | | _ __ | |__ | | | | |_) | - | | |_ | '__/ /\ \ | | | '_ \| '_ \| | | | _ < - | |__| | | / ____ \ _| |_| |_) | | | | |__| | |_) | - \_____|_|/_/ \_\_____| .__/|_| |_|_____/|____/ - | | - |_| \ No newline at end of file diff --git a/src/graiph-database/scala/cn/graiph/db/GraiphDB.scala b/src/graiph-database/scala/cn/graiph/db/GraiphDB.scala deleted file mode 100644 index 8a9151df..00000000 --- a/src/graiph-database/scala/cn/graiph/db/GraiphDB.scala +++ /dev/null @@ -1,85 +0,0 @@ -package cn.graiph.db - -import java.io.File - -import cn.graiph.driver.CypherService -import cn.graiph.util.Logging -import cn.graiph.{CustomPropertyProvider, CypherPluginRegistry, ValueMatcher} -import org.neo4j.graphdb.GraphDatabaseService -import org.neo4j.graphdb.factory.GraphDatabaseFactory -import org.neo4j.kernel.impl.blob.{BlobPropertyStoreServiceContext, BlobPropertyStoreServicePlugin, BlobPropertyStoreServicePlugins} -import org.springframework.context.support.FileSystemXmlApplicationContext - -/** - * Created by bluejoe on 2019/7/17. - */ -object GraiphDB extends Logging with Touchable { - SemanticOperatorPluginInjection.touch; - - def openDatabase(dbDir: File, propertiesFile: File): GraphDatabaseService = { - val builder = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(dbDir); - logger.info(s"loading configuration from $propertiesFile"); - builder.loadPropertiesFromFile(propertiesFile.getPath); - //bolt server is not required - builder.setConfig("dbms.connector.bolt.enabled", "false"); - builder.newGraphDatabase(); - } - - def connect(dbs: GraphDatabaseService): CypherService = { - new LocalGraphService(dbs); - } -} - -trait Touchable { - //do nothing, just ensure this object is initialized - final def touch: Unit = { - } -} - -object SemanticOperatorPluginInjection extends Touchable { - BlobPropertyStoreServicePlugins.add(new SemanticOperatorPlugin()); - - class SemanticOperatorPlugin extends BlobPropertyStoreServicePlugin with Logging { - override def init(ctx: BlobPropertyStoreServiceContext): Unit = { - val configuration = ctx.configuration; - val cypherPluginRegistry = configuration.getRaw("blob.plugins.conf").map(x => { - val xml = new File(x); - - val path = - if (xml.isAbsolute) { - xml.getPath - } - else { - val configFilePath = configuration.getRaw("config.file.path") - if (configFilePath.isDefined) { - new File(new File(configFilePath.get).getParentFile, x).getAbsoluteFile.getCanonicalPath - } - else { - xml.getAbsoluteFile.getCanonicalPath - } - } - - logger.info(s"loading semantic plugins: $path"); - val appctx = new FileSystemXmlApplicationContext("file:" + path); - appctx.getBean[CypherPluginRegistry](classOf[CypherPluginRegistry]); - }).getOrElse { - logger.info(s"semantic plugins not loaded: blob.plugins.conf=null"); - new CypherPluginRegistry() - } - - val customPropertyProvider = cypherPluginRegistry.createCustomPropertyProvider(configuration); - val valueMatcher = cypherPluginRegistry.createValueComparatorRegistry(configuration); - - ctx.instanceContext.put[CustomPropertyProvider](customPropertyProvider); - ctx.instanceContext.put[ValueMatcher](valueMatcher); - } - - override def stop(ctx: BlobPropertyStoreServiceContext): Unit = { - - } - - override def start(ctx: BlobPropertyStoreServiceContext): Unit = { - - } - } -} \ No newline at end of file diff --git a/src/graiph-database/scala/cn/graiph/server/GraiphServer.scala b/src/graiph-database/scala/cn/graiph/server/GraiphServer.scala deleted file mode 100644 index 6da26460..00000000 --- a/src/graiph-database/scala/cn/graiph/server/GraiphServer.scala +++ /dev/null @@ -1,52 +0,0 @@ -package cn.graiph.server - -import java.io.File -import java.util.Optional - -import cn.graiph.db.{GraiphDB, Touchable} -import cn.graiph.util.Logging -import org.apache.commons.io.IOUtils -import org.neo4j.server.{AbstractNeoServer, CommunityBootstrapper} - -import scala.collection.JavaConversions - -/** - * Created by bluejoe on 2019/7/17. - */ -object GraiphServer extends Logging with Touchable { - val logo = IOUtils.toString(this.getClass.getClassLoader.getResourceAsStream("logo.txt"), "utf-8"); - AbstractNeoServer.NEO4J_IS_STARTING_MESSAGE = "======== Graiph (based on Neo4j-3.5.6) ======== "+"\r\n"+logo; - GraiphDB.touch; - - def startServer(dbDir: File, configFile: File, configOverrides: Map[String, String] = Map()): GraiphServer = { - val server = new GraiphServer(dbDir, configFile, configOverrides); - server.start(); - server; - } -} - -class GraiphServer(dbDir: File, configFile: File, configOverrides: Map[String, String] = Map()) { - val server = new CommunityBootstrapper(); - - def start(): Int = { - server.start(dbDir, Optional.of(configFile), - JavaConversions.mapAsJavaMap(configOverrides)); - } - - def shutdown(): Int = { - server.stop(); - } -} - -object GraiphServerStarter { - def main(args: Array[String]) { - if (args.length != 2) { - sys.error(s"Usage:\r\n"); - sys.error(s"\tGraiphServerStarter \r\n"); - } - else { - GraiphServer.startServer(new File(args(0)), - new File(args(1))); - } - } -} \ No newline at end of file diff --git a/src/graiph-database/scala/cn/graiph/server/GraiphServerEntryPoint.scala b/src/graiph-database/scala/cn/graiph/server/GraiphServerEntryPoint.scala deleted file mode 100644 index 1507e9da..00000000 --- a/src/graiph-database/scala/cn/graiph/server/GraiphServerEntryPoint.scala +++ /dev/null @@ -1,18 +0,0 @@ -package cn.graiph.server - -import org.neo4j.server.CommunityEntryPoint - -/** - * Created by bluejoe on 2019/7/26. - */ -object GraiphServerEntryPoint { - - def main(args: Array[String]) = { - GraiphServer.touch; - CommunityEntryPoint.main(args) - }; - - def start(args: Array[String]) = CommunityEntryPoint.start(_); - - def stop(args: Array[String]) = CommunityEntryPoint.stop(_); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/AccessMode.java b/src/graiph-driver/java/org/neo4j/driver/AccessMode.java deleted file mode 100644 index d4db2f67..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/AccessMode.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -/** - * Used by Routing Driver to decide if a transaction should be routed to a write server or a read server in a cluster. - * When running a transaction, a write transaction requires a server that supports writes. - * A read transaction, on the other hand, requires a server that supports read operations. - * This classification is key for routing driver to route transactions to a cluster correctly. - * - * While any {@link AccessMode} will be ignored while running transactions via a driver towards a single server. - * As the single server serves both read and write operations at the same time. - */ -public enum AccessMode -{ - /** - * Use this for transactions that requires a read server in a cluster - */ - READ, - /** - * Use this for transactions that requires a write server in a cluster - */ - WRITE -} diff --git a/src/graiph-driver/java/org/neo4j/driver/AuthToken.java b/src/graiph-driver/java/org/neo4j/driver/AuthToken.java deleted file mode 100644 index fd3fd768..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/AuthToken.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -/** - * Token for holding authentication details, such as user name and password. - * Such a token is required by a {@link Driver} to authenticate with a Neo4j - * instance. - * - * @see AuthTokens - * @see GraphDatabase#driver(String, AuthToken) - * @since 1.0 - */ -public interface AuthToken -{ - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/AuthTokens.java b/src/graiph-driver/java/org/neo4j/driver/AuthTokens.java deleted file mode 100644 index 68e13285..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/AuthTokens.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.Map; -import java.util.Objects; - -import org.neo4j.driver.internal.security.InternalAuthToken; - -import static java.util.Collections.singletonMap; -import static org.neo4j.driver.internal.security.InternalAuthToken.CREDENTIALS_KEY; -import static org.neo4j.driver.internal.security.InternalAuthToken.PARAMETERS_KEY; -import static org.neo4j.driver.internal.security.InternalAuthToken.PRINCIPAL_KEY; -import static org.neo4j.driver.internal.security.InternalAuthToken.REALM_KEY; -import static org.neo4j.driver.internal.security.InternalAuthToken.SCHEME_KEY; -import static org.neo4j.driver.internal.util.Iterables.newHashMapWithSize; -import static org.neo4j.driver.Values.value; - -/** - * This is a listing of the various methods of authentication supported by this - * driver. The scheme used must be supported by the Neo4j Instance you are connecting - * to. - * @see GraphDatabase#driver(String, AuthToken) - * @since 1.0 - */ -public class AuthTokens -{ - /** - * The basic authentication scheme, using a username and a password. - * @param username this is the "principal", identifying who this token represents - * @param password this is the "credential", proving the identity of the user - * @return an authentication token that can be used to connect to Neo4j - * @see GraphDatabase#driver(String, AuthToken) - * @throws NullPointerException when either username or password is {@code null} - */ - public static AuthToken basic( String username, String password ) - { - return basic( username, password, null ); - } - - /** - * The basic authentication scheme, using a username and a password. - * @param username this is the "principal", identifying who this token represents - * @param password this is the "credential", proving the identity of the user - * @param realm this is the "realm", specifies the authentication provider - * @return an authentication token that can be used to connect to Neo4j - * @see GraphDatabase#driver(String, AuthToken) - * @throws NullPointerException when either username or password is {@code null} - */ - public static AuthToken basic( String username, String password, String realm ) - { - Objects.requireNonNull( username, "Username can't be null" ); - Objects.requireNonNull( password, "Password can't be null" ); - - Map map = newHashMapWithSize( 4 ); - map.put( SCHEME_KEY, value( "basic" ) ); - map.put( PRINCIPAL_KEY, value( username ) ); - map.put( CREDENTIALS_KEY, value( password ) ); - if ( realm != null ) - { - map.put( REALM_KEY, value( realm ) ); - } - return new InternalAuthToken( map ); - } - - /** - * The kerberos authentication scheme, using a base64 encoded ticket - * @param base64EncodedTicket a base64 encoded service ticket - * @return an authentication token that can be used to connect to Neo4j - * @see GraphDatabase#driver(String, AuthToken) - * @since 1.3 - * @throws NullPointerException when ticket is {@code null} - */ - public static AuthToken kerberos( String base64EncodedTicket ) - { - Objects.requireNonNull( base64EncodedTicket, "Ticket can't be null" ); - - Map map = newHashMapWithSize( 3 ); - map.put( SCHEME_KEY, value( "kerberos" ) ); - map.put( PRINCIPAL_KEY, value( "" ) ); // This empty string is required for backwards compatibility. - map.put( CREDENTIALS_KEY, value( base64EncodedTicket ) ); - return new InternalAuthToken( map ); - } - - /** - * A custom authentication token used for doing custom authentication on the server side. - * @param principal this used to identify who this token represents - * @param credentials this is credentials authenticating the principal - * @param realm this is the "realm:, specifying the authentication provider. - * @param scheme this it the authentication scheme, specifying what kind of authentication that should be used - * @return an authentication token that can be used to connect to Neo4j - * @see GraphDatabase#driver(String, AuthToken) - * @throws NullPointerException when either principal, credentials or scheme is {@code null} - */ - public static AuthToken custom( String principal, String credentials, String realm, String scheme) - { - return custom( principal, credentials, realm, scheme, null ); - } - - /** - * A custom authentication token used for doing custom authentication on the server side. - * @param principal this used to identify who this token represents - * @param credentials this is credentials authenticating the principal - * @param realm this is the "realm:, specifying the authentication provider. - * @param scheme this it the authentication scheme, specifying what kind of authentication that should be used - * @param parameters extra parameters to be sent along the authentication provider. - * @return an authentication token that can be used to connect to Neo4j - * @see GraphDatabase#driver(String, AuthToken) - * @throws NullPointerException when either principal, credentials or scheme is {@code null} - */ - public static AuthToken custom( String principal, String credentials, String realm, String scheme, Map parameters) - { - Objects.requireNonNull( principal, "Principal can't be null" ); - Objects.requireNonNull( credentials, "Credentials can't be null" ); - Objects.requireNonNull( scheme, "Scheme can't be null" ); - - Map map = newHashMapWithSize( 5 ); - map.put( SCHEME_KEY, value( scheme ) ); - map.put( PRINCIPAL_KEY, value( principal ) ); - map.put( CREDENTIALS_KEY, value( credentials ) ); - if ( realm != null ) - { - map.put( REALM_KEY, value( realm ) ); - } - if ( parameters != null ) - { - map.put( PARAMETERS_KEY, value( parameters ) ); - } - return new InternalAuthToken( map ); - } - - /** - * No authentication scheme. This will only work if authentication is disabled - * on the Neo4j Instance we are connecting to. - * @return an authentication token that can be used to connect to Neo4j instances with auth disabled - * @see GraphDatabase#driver(String, AuthToken) - */ - public static AuthToken none() - { - return new InternalAuthToken( singletonMap( SCHEME_KEY, value( "none" ) ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Config.java b/src/graiph-driver/java/org/neo4j/driver/Config.java deleted file mode 100644 index a02a6088..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Config.java +++ /dev/null @@ -1,994 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.io.File; -import java.net.InetAddress; -import java.util.Objects; -import java.util.concurrent.TimeUnit; -import java.util.logging.Level; - -import org.neo4j.driver.internal.async.pool.PoolSettings; -import org.neo4j.driver.internal.cluster.RoutingSettings; -import org.neo4j.driver.internal.retry.RetrySettings; -import org.neo4j.driver.exceptions.ServiceUnavailableException; -import org.neo4j.driver.exceptions.SessionExpiredException; -import org.neo4j.driver.exceptions.TransientException; -import org.neo4j.driver.net.ServerAddressResolver; -import org.neo4j.driver.util.Experimental; -import org.neo4j.driver.util.Immutable; -import org.neo4j.driver.util.Resource; - -import static org.neo4j.driver.Config.TrustStrategy.trustAllCertificates; -import static org.neo4j.driver.Logging.javaUtilLogging; - -/** - * A configuration class to config driver properties. - *

- * To build a simple config with custom logging implementation: - *

- * {@code
- * Config config = Config
- *                  .build()
- *                  .withLogging(new MyLogging())
- *                  .toConfig();
- * }
- * 
- *

- * To build a more complicated config with tuned connection pool options: - *

- * {@code
- * Config config = Config.build()
- *                          .withEncryption()
- *                          .withConnectionTimeout(10, TimeUnit.SECONDS)
- *                          .withMaxConnectionLifetime(30, TimeUnit.MINUTES)
- *                          .withMaxConnectionPoolSize(10)
- *                          .withConnectionAcquisitionTimeout(20, TimeUnit.SECONDS)
- *                          .toConfig();
- * }
- * 
- * - * @since 1.0 - */ -@Immutable -public class Config -{ - /** User defined logging */ - private final Logging logging; - private final boolean logLeakedSessions; - - private final int maxConnectionPoolSize; - - private final long idleTimeBeforeConnectionTest; - private final long maxConnectionLifetimeMillis; - private final long connectionAcquisitionTimeoutMillis; - - /** Indicator for encrypted traffic */ - private final boolean encrypted; - - /** Strategy for how to trust encryption certificate */ - private final TrustStrategy trustStrategy; - - private final int routingFailureLimit; - private final long routingRetryDelayMillis; - private final int connectionTimeoutMillis; - private final RetrySettings retrySettings; - - private final LoadBalancingStrategy loadBalancingStrategy; - private final ServerAddressResolver resolver; - - private final boolean isMetricsEnabled; - - private Config( ConfigBuilder builder ) - { - this.logging = builder.logging; - this.logLeakedSessions = builder.logLeakedSessions; - - this.idleTimeBeforeConnectionTest = builder.idleTimeBeforeConnectionTest; - this.maxConnectionLifetimeMillis = builder.maxConnectionLifetimeMillis; - this.maxConnectionPoolSize = builder.maxConnectionPoolSize; - this.connectionAcquisitionTimeoutMillis = builder.connectionAcquisitionTimeoutMillis; - - this.encrypted = builder.encrypted; - this.trustStrategy = builder.trustStrategy; - this.routingFailureLimit = builder.routingFailureLimit; - this.routingRetryDelayMillis = builder.routingRetryDelayMillis; - this.connectionTimeoutMillis = builder.connectionTimeoutMillis; - this.retrySettings = builder.retrySettings; - this.loadBalancingStrategy = builder.loadBalancingStrategy; - this.resolver = builder.resolver; - - this.isMetricsEnabled = builder.isMetricsEnabled; - } - - /** - * Logging provider - * @return the Logging provider to use - */ - public Logging logging() - { - return logging; - } - - /** - * Check if leaked sessions logging is enabled. - * - * @return {@code true} if enabled, {@code false} otherwise. - */ - public boolean logLeakedSessions() - { - return logLeakedSessions; - } - - /** - * Max number of connections per URL for this driver. - * - * @return the max number of connections - * @deprecated please use {@link #maxConnectionPoolSize()} instead. - */ - @Deprecated - public int connectionPoolSize() - { - return maxConnectionPoolSize; - } - - /** - * Max number of idle connections per URL for this driver. - * - * @return the max number of connections - * @deprecated please use {@link #maxConnectionPoolSize()} instead. - */ - @Deprecated - public int maxIdleConnectionPoolSize() - { - return maxConnectionPoolSize; - } - - /** - * Pooled connections that have been idle in the pool for longer than this timeout - * will be tested before they are used again, to ensure they are still live. - * - * @return idle time in milliseconds - */ - public long idleTimeBeforeConnectionTest() - { - return idleTimeBeforeConnectionTest; - } - - /** - * Pooled connections older than this threshold will be closed and removed from the pool. - * - * @return maximum lifetime in milliseconds - */ - public long maxConnectionLifetimeMillis() - { - return maxConnectionLifetimeMillis; - } - - /** - * @return the configured connection timeout value in milliseconds. - */ - public int connectionTimeoutMillis() - { - return connectionTimeoutMillis; - } - - public int maxConnectionPoolSize() - { - return maxConnectionPoolSize; - } - - public long connectionAcquisitionTimeoutMillis() - { - return connectionAcquisitionTimeoutMillis; - } - - /** - * @return the level of encryption required for all connections. - */ - @Deprecated - public EncryptionLevel encryptionLevel() - { - return encrypted ? EncryptionLevel.REQUIRED : EncryptionLevel.NONE; - } - - /** - * @return indicator for encrypted communication. - */ - public boolean encrypted() - { - return encrypted; - } - - /** - * @return the strategy to use to determine the authenticity of an encryption certificate provided by the Neo4j instance we are connecting to. - */ - public TrustStrategy trustStrategy() - { - return trustStrategy; - } - - /** - * Load balancing strategy. - * - * @return the strategy to use. - */ - @Experimental - public LoadBalancingStrategy loadBalancingStrategy() - { - return loadBalancingStrategy; - } - - /** - * Server address resolver. - * - * @return the resolver to use. - */ - public ServerAddressResolver resolver() - { - return resolver; - } - - /** - * Start building a {@link Config} object using a newly created builder. - *

- * Please use {@link #builder()} method instead. - * - * @return a new {@link ConfigBuilder} instance. - */ - public static ConfigBuilder build() - { - return builder(); - } - - /** - * Start building a {@link Config} object using a newly created builder. - * - * @return a new {@link ConfigBuilder} instance. - */ - public static ConfigBuilder builder() - { - return new ConfigBuilder(); - } - - /** - * @return A config with all default settings - */ - public static Config defaultConfig() - { - return Config.builder().build(); - } - - RoutingSettings routingSettings() - { - return new RoutingSettings( routingFailureLimit, routingRetryDelayMillis ); - } - - RetrySettings retrySettings() - { - return retrySettings; - } - - public boolean isMetricsEnabled() - { - return isMetricsEnabled; - } - - /** - * Used to build new config instances - */ - public static class ConfigBuilder - { - private Logging logging = javaUtilLogging( Level.INFO ); - private boolean logLeakedSessions; - private int maxConnectionPoolSize = PoolSettings.DEFAULT_MAX_CONNECTION_POOL_SIZE; - private long idleTimeBeforeConnectionTest = PoolSettings.DEFAULT_IDLE_TIME_BEFORE_CONNECTION_TEST; - private long maxConnectionLifetimeMillis = PoolSettings.DEFAULT_MAX_CONNECTION_LIFETIME; - private long connectionAcquisitionTimeoutMillis = PoolSettings.DEFAULT_CONNECTION_ACQUISITION_TIMEOUT; - private boolean encrypted = true; - private TrustStrategy trustStrategy = trustAllCertificates(); - private LoadBalancingStrategy loadBalancingStrategy = LoadBalancingStrategy.LEAST_CONNECTED; - private int routingFailureLimit = RoutingSettings.DEFAULT.maxRoutingFailures(); - private long routingRetryDelayMillis = RoutingSettings.DEFAULT.retryTimeoutDelay(); - private int connectionTimeoutMillis = (int) TimeUnit.SECONDS.toMillis( 5 ); - private RetrySettings retrySettings = RetrySettings.DEFAULT; - private ServerAddressResolver resolver; - private boolean isMetricsEnabled = false; - - private ConfigBuilder() {} - - /** - * Provide a logging implementation for the driver to use. Java logging framework {@link java.util.logging} with {@link Level#INFO} is used by default. - * Callers are expected to either implement {@link Logging} interface or provide one of the existing implementations available from static factory - * methods in the {@link Logging} interface. - *

- * Please see documentation in {@link Logging} for more information. - * - * @param logging the logging instance to use - * @return this builder - * @see Logging - */ - public ConfigBuilder withLogging( Logging logging ) - { - this.logging = logging; - return this; - } - - /** - * Provide an alternative load balancing strategy for the routing driver to use. By default we use - * {@link LoadBalancingStrategy#LEAST_CONNECTED}. - *

- * Note: We are experimenting with different strategies. This could be removed in the next minor version. - * - * @param loadBalancingStrategy the strategy to use - * @return this builder - */ - @Experimental - public ConfigBuilder withLoadBalancingStrategy( LoadBalancingStrategy loadBalancingStrategy ) - { - this.loadBalancingStrategy = loadBalancingStrategy; - return this; - } - - /** - * Enable logging of leaked sessions. - *

- * Each {@link Session session} is associated with a network connection and thus is a - * {@link Resource resource} that needs to be explicitly closed. Unclosed sessions will result in socket - * leaks and could cause {@link OutOfMemoryError}s. - *

- * Session is considered to be leaked when it is finalized via {@link Object#finalize()} while not being - * closed. This option turns on logging of such sessions and stacktraces of where they were created. - *

- * Note: this option should mostly be used in testing environments for session leak investigations. - * Enabling it will add object finalization overhead. - * - * @return this builder - */ - public ConfigBuilder withLeakedSessionsLogging() - { - this.logLeakedSessions = true; - return this; - } - - /** - * The max number of sessions to keep open at once. Configure this - * higher if you want more concurrent sessions, or lower if you want - * to lower the pressure on the database instance. - *

- * If the driver is asked to provide more sessions than this, it will - * block waiting for another session to be closed, with a timeout. - *

- * Method is deprecated and will forward the given argument to {@link #withMaxConnectionPoolSize(int)}. - * - * @param size the max number of sessions to keep open - * @return this builder - * @deprecated please use a combination of {@link #withMaxConnectionPoolSize(int)} and - * {@link #withConnectionAcquisitionTimeout(long, TimeUnit)} instead. - */ - @Deprecated - public ConfigBuilder withMaxSessions( int size ) - { - return withMaxConnectionPoolSize( size ); - } - - /** - * The max number of idle sessions to keep open at once. Configure this - * higher if you want more concurrent sessions, or lower if you want - * to lower the pressure on the database instance. - *

- * Method is deprecated and will not change the driver configuration. - * - * @param size the max number of idle sessions to keep open - * @return this builder - * @deprecated please use a combination of {@link #withMaxConnectionPoolSize(int)} and - * {@link #withConnectionAcquisitionTimeout(long, TimeUnit)} instead. - */ - @Deprecated - public ConfigBuilder withMaxIdleSessions( int size ) - { - return this; - } - - /** - * The max number of idle connections to keep open at once. Configure this - * higher for greater concurrency, or lower to reduce the pressure on the - * database instance. - *

- * Method is deprecated and will not change the driver configuration. - * - * @param size the max number of idle connections to keep open - * @return this builder - * @deprecated please use a combination of {@link #withMaxConnectionPoolSize(int)} and - * {@link #withConnectionAcquisitionTimeout(long, TimeUnit)} instead. - */ - @Deprecated - public ConfigBuilder withMaxIdleConnections( int size ) - { - return this; - } - - /** - * Please use {@link #withConnectionLivenessCheckTimeout(long, TimeUnit)}. - * - * @param timeout minimum idle time in milliseconds - * @return this builder - * @see #withConnectionLivenessCheckTimeout(long, TimeUnit) - * @deprecated please use {@link #withConnectionLivenessCheckTimeout(long, TimeUnit)} method. This method - * will be removed in future release. - */ - @Deprecated - public ConfigBuilder withSessionLivenessCheckTimeout( long timeout ) - { - return withConnectionLivenessCheckTimeout( timeout, TimeUnit.MILLISECONDS ); - } - - /** - * Pooled connections that have been idle in the pool for longer than this timeout - * will be tested before they are used again, to ensure they are still live. - *

- * If this option is set too low, an additional network call will be - * incurred when acquiring a connection, which causes a performance hit. - *

- * If this is set high, you may receive sessions that are backed by no longer live connections, - * which will lead to exceptions in your application. Assuming the - * database is running, these exceptions will go away if you retry acquiring sessions. - *

- * Hence, this parameter tunes a balance between the likelihood of your - * application seeing connection problems, and performance. - *

- * You normally should not need to tune this parameter. - * No connection liveliness check is done by default. - * Value {@code 0} means connections will always be tested for - * validity and negative values mean connections will never be tested. - * - * @param value the minimum idle time - * @param unit the unit in which the duration is given - * @return this builder - */ - public ConfigBuilder withConnectionLivenessCheckTimeout( long value, TimeUnit unit ) - { - this.idleTimeBeforeConnectionTest = unit.toMillis( value ); - return this; - } - - /** - * Pooled connections older than this threshold will be closed and removed from the pool. Such discarding - * happens during connection acquisition so that new session is never backed by an old connection. - *

- * Setting this option to a low value will cause a high connection churn and might result in a performance hit. - *

- * It is recommended to set maximum lifetime to a slightly smaller value than the one configured in network - * equipment (load balancer, proxy, firewall, etc. can also limit maximum connection lifetime). - *

- * Setting can also be used in combination with {@link #withConnectionLivenessCheckTimeout(long, TimeUnit)}. In - * this case, it is recommended to set liveness check to a value smaller than network equipment has and maximum - * lifetime to a reasonably large value to "renew" connections once in a while. - *

- * Default maximum connection lifetime is 1 hour. Zero and negative values result in lifetime not being - * checked. - * - * @param value the maximum connection lifetime - * @param unit the unit in which the duration is given - * @return this builder - */ - public ConfigBuilder withMaxConnectionLifetime( long value, TimeUnit unit ) - { - this.maxConnectionLifetimeMillis = unit.toMillis( value ); - return this; - } - - /** - * Configure maximum amount of connections in the connection pool towards a single database. This setting - * limits total amount of connections in the pool when used in direct driver, created for URI with 'bolt' - * scheme. It will limit amount of connections per cluster member when used with routing driver, created for - * URI with 'neo4j' scheme. - *

- * Acquisition will be attempted for at most configured timeout - * {@link #withConnectionAcquisitionTimeout(long, TimeUnit)} when limit is reached. - *

- * Default value is {@code 100}. Negative values are allowed and result in unlimited pool. Value of {@code 0} - * is not allowed. - * - * @param value the maximum connection pool size. - * @return this builder - * @see #withConnectionAcquisitionTimeout(long, TimeUnit) - */ - public ConfigBuilder withMaxConnectionPoolSize( int value ) - { - if ( value == 0 ) - { - throw new IllegalArgumentException( "Zero value is not supported" ); - } - else if ( value < 0 ) - { - this.maxConnectionPoolSize = Integer.MAX_VALUE; - } - else - { - this.maxConnectionPoolSize = value; - } - return this; - } - - /** - * Configure maximum amount of time connection acquisition will attempt to acquire a connection from the - * connection pool. This timeout only kicks in when all existing connections are being used and no new - * connections can be created because maximum connection pool size has been reached. - *

- * Exception is raised when connection can't be acquired within configured time. - *

- * Default value is 60 seconds. Negative values are allowed and result in unlimited acquisition timeout. Value - * of {@code 0} is allowed and results in no timeout and immediate failure when connection is unavailable. - * - * @param value the acquisition timeout - * @param unit the unit in which the duration is given - * @return this builder - * @see #withMaxConnectionPoolSize(int) - */ - public ConfigBuilder withConnectionAcquisitionTimeout( long value, TimeUnit unit ) - { - long valueInMillis = unit.toMillis( value ); - if ( value >= 0 ) - { - this.connectionAcquisitionTimeoutMillis = valueInMillis; - } - else - { - this.connectionAcquisitionTimeoutMillis = -1; - } - return this; - } - - /** - * Configure the {@link EncryptionLevel} to use, use this to control wether the driver uses TLS encryption or not. - * @param level the TLS level to use - * @return this builder - */ - @Deprecated - public ConfigBuilder withEncryptionLevel( EncryptionLevel level ) - { - this.encrypted = level == EncryptionLevel.REQUIRED; - return this; - } - - /** - * Set to use encrypted traffic. - * @return this builder - */ - public ConfigBuilder withEncryption() - { - this.encrypted = true; - return this; - } - - /** - * Set to use unencrypted traffic. - * @return this builder - */ - public ConfigBuilder withoutEncryption() - { - this.encrypted = false; - return this; - } - - /** - * Specify how to determine the authenticity of an encryption certificate provided by the Neo4j instance we are connecting to. - * This defaults to {@link TrustStrategy#trustAllCertificates()}. - * See {@link TrustStrategy#trustCustomCertificateSignedBy(File)} for using certificate signatures instead to verify - * trust. - *

- * This is an important setting to understand, because unless we know that the remote server we have an encrypted connection to - * is really Neo4j, there is no point to encrypt at all, since anyone could pretend to be the remote Neo4j instance. - *

- * For this reason, there is no option to disable trust verification, if you find this cumbersome you should disable encryption using - * - * @param trustStrategy TLS authentication strategy - * @return this builder - */ - public ConfigBuilder withTrustStrategy( TrustStrategy trustStrategy ) - { - this.trustStrategy = trustStrategy; - return this; - } - - /** - * Specify how many times the client should attempt to reconnect to the routing servers before declaring the - * cluster unavailable. - *

- * The routing servers are tried in order. If connecting any of them fails, they are all retried after - * {@linkplain #withRoutingRetryDelay a delay}. This process of retrying all servers is then repeated for the - * number of times specified here before considering the cluster unavailable. - *

- * The default value of this parameter is {@code 1}, which means that the the driver will not re-attempt to - * connect to the cluster when connecting has failed to each individual server in the list of routers. This - * default value is sensible under this assumption that if the attempt to connect fails for all servers, then - * the entire cluster is down, or the client is disconnected from the network, and retrying to connect will - * not bring it back up, in which case it is better to report the failure sooner. - * - * @param routingFailureLimit - * the number of times to retry each server in the list of routing servers - * @return this builder - * @deprecated in 1.2 because driver memorizes seed URI used during construction and falls back to it at - * runtime when all other known router servers failed to respond. Driver is also able to perform DNS lookup - * for the seed URI during rediscovery. This means updates of cluster members will be picked up if they are - * reflected in a DNS record. This configuration allowed driver to retry rediscovery procedure and postpone - * failure. Currently there exists a better way of doing retries via - * {@link Session#readTransaction(TransactionWork)} and {@link Session#writeTransaction(TransactionWork)}. - * Method will be removed in the next major release. - */ - @Deprecated - public ConfigBuilder withRoutingFailureLimit( int routingFailureLimit ) - { - if ( routingFailureLimit < 1 ) - { - throw new IllegalArgumentException( - "The failure limit may not be smaller than 1, but was: " + routingFailureLimit ); - } - this.routingFailureLimit = routingFailureLimit; - return this; - } - - /** - * Specify how long to wait before retrying to connect to a routing server. - *

- * When connecting to all routing servers fail, connecting will be retried after the delay specified here. - * The delay is measured from when the first attempt to connect was made, so that the delay time specifies a - * retry interval. - *

- * For each {@linkplain #withRoutingFailureLimit retry attempt} the delay time will be doubled. The time - * specified here is the base time, i.e. the time to wait before the first retry. If that attempt (on all - * servers) also fails, the delay before the next retry will be double the time specified here, and the next - * attempt after that will be double that, et.c. So if, for example, the delay specified here is - * {@code 5 SECONDS}, then after attempting to connect to each server fails reconnecting will be attempted - * 5 seconds after the first connection attempt to the first server. If that attempt also fails to connect to - * all servers, the next attempt will start 10 seconds after the second attempt started. - *

- * The default value of this parameter is {@code 5 SECONDS}. - * - * @param delay - * the amount of time between attempts to reconnect to the same server - * @param unit - * the unit in which the duration is given - * @return this builder - * @deprecated in 1.2 because driver memorizes seed URI used during construction and falls back to it at - * runtime when all other known router servers failed to respond. Driver is also able to perform DNS lookup - * for the seed URI during rediscovery. This means updates of cluster members will be picked up if they are - * reflected in a DNS record. This configuration allowed driver to retry rediscovery procedure and postpone - * failure. Currently there exists a better way of doing retries via - * {@link Session#readTransaction(TransactionWork)} and {@link Session#writeTransaction(TransactionWork)}. - * Method will be removed in the next major release. - */ - @Deprecated - public ConfigBuilder withRoutingRetryDelay( long delay, TimeUnit unit ) - { - long routingRetryDelayMillis = unit.toMillis( delay ); - if ( routingRetryDelayMillis < 0 ) - { - throw new IllegalArgumentException( String.format( - "The retry delay may not be smaller than 0, but was %d %s.", delay, unit ) ); - } - this.routingRetryDelayMillis = routingRetryDelayMillis; - return this; - } - - /** - * Specify socket connection timeout. - *

- * A timeout of zero is treated as an infinite timeout and will be bound by the timeout configured on the - * operating system level. The connection will block until established or an error occurs. - *

- * Timeout value should be greater or equal to zero and represent a valid {@code int} value when converted to - * {@link TimeUnit#MILLISECONDS milliseconds}. - *

- * The default value of this parameter is {@code 5 SECONDS}. - * - * @param value the timeout duration - * @param unit the unit in which duration is given - * @return this builder - * @throws IllegalArgumentException when given value is negative or does not fit in {@code int} when - * converted to milliseconds. - */ - public ConfigBuilder withConnectionTimeout( long value, TimeUnit unit ) - { - long connectionTimeoutMillis = unit.toMillis( value ); - if ( connectionTimeoutMillis < 0 ) - { - throw new IllegalArgumentException( String.format( - "The connection timeout may not be smaller than 0, but was %d %s.", value, unit ) ); - } - int connectionTimeoutMillisInt = (int) connectionTimeoutMillis; - if ( connectionTimeoutMillisInt != connectionTimeoutMillis ) - { - throw new IllegalArgumentException( String.format( - "The connection timeout must represent int value when converted to milliseconds %d.", - connectionTimeoutMillis ) ); - } - this.connectionTimeoutMillis = connectionTimeoutMillisInt; - return this; - } - - /** - * Specify the maximum time transactions are allowed to retry via - * {@link Session#readTransaction(TransactionWork)} and {@link Session#writeTransaction(TransactionWork)} - * methods. These methods will retry the given unit of work on {@link ServiceUnavailableException}, - * {@link SessionExpiredException} and {@link TransientException} with exponential backoff using initial - * delay of 1 second. - *

- * Default value is 30 seconds. - * - * @param value the timeout duration - * @param unit the unit in which duration is given - * @return this builder - * @throws IllegalArgumentException when given value is negative - */ - public ConfigBuilder withMaxTransactionRetryTime( long value, TimeUnit unit ) - { - long maxRetryTimeMs = unit.toMillis( value ); - if ( maxRetryTimeMs < 0 ) - { - throw new IllegalArgumentException( String.format( - "The max retry time may not be smaller than 0, but was %d %s.", value, unit ) ); - } - this.retrySettings = new RetrySettings( maxRetryTimeMs ); - return this; - } - - /** - * Specify a custom server address resolver used by the routing driver to resolve the initial address used to create the driver. - * Such resolution happens: - *

    - *
  • during the very first rediscovery when driver is created
  • - *
  • when all the known routers from the current routing table have failed and driver needs to fallback to the initial address
  • - *
- * By default driver performs a DNS lookup for the initial address using {@link InetAddress#getAllByName(String)}. - * - * @param resolver the resolver to use. - * @return this builder. - * @throws NullPointerException when the given resolver is {@code null}. - */ - public ConfigBuilder withResolver( ServerAddressResolver resolver ) - { - this.resolver = Objects.requireNonNull( resolver, "resolver" ); - return this; - } - - /** - * Enable driver metrics. The metrics can be obtained afterwards via {@link Driver#metrics()}. - * @return this builder. - */ - public ConfigBuilder withDriverMetrics() - { - this.isMetricsEnabled = true; - return this; - } - - /** - * Disable driver metrics. When disabled, driver metrics cannot be accessed via {@link Driver#metrics()}. - * @return this builder. - */ - public ConfigBuilder withoutDriverMetrics() - { - this.isMetricsEnabled = false; - return this; - } - - /** - * Create a config instance from this builder. - *

- * Please use {@link #build()} method instead. - * - * @return a new {@link Config} instance. - */ - public Config toConfig() - { - return build(); - } - - /** - * Create a config instance from this builder. - * - * @return a new {@link Config} instance. - */ - public Config build() - { - return new Config( this ); - } - } - - /** - * Control the level of encryption to require - */ - public enum EncryptionLevel - { - /** With this level, the driver will only connect to the server if it can do it without encryption. */ - NONE, - - /** With this level, the driver will only connect to the server it if can do it with encryption. */ - REQUIRED - } - - @Experimental - public enum LoadBalancingStrategy - { - ROUND_ROBIN, - LEAST_CONNECTED - } - - /** - * Control how the driver determines if it can trust the encryption certificates provided by the Neo4j instance it is connected to. - */ - public static class TrustStrategy - { - /** - * The trust strategy that the driver supports - */ - public enum Strategy - { - @Deprecated - TRUST_ON_FIRST_USE, - - @Deprecated - TRUST_SIGNED_CERTIFICATES, - - TRUST_ALL_CERTIFICATES, - - TRUST_CUSTOM_CA_SIGNED_CERTIFICATES, - - TRUST_SYSTEM_CA_SIGNED_CERTIFICATES - } - - private final Strategy strategy; - private final File certFile; - private boolean hostnameVerificationEnabled; - - private TrustStrategy( Strategy strategy ) - { - this( strategy, null ); - } - - private TrustStrategy( Strategy strategy, File certFile ) - { - this.strategy = strategy; - this.certFile = certFile; - } - - /** - * Return the strategy type desired. - * - * @return the strategy we should use - */ - public Strategy strategy() - { - return strategy; - } - - /** - * Return the configured certificate file. - * - * @return configured certificate or {@code null} if trust strategy does not require a certificate. - */ - public File certFile() - { - return certFile; - } - - /** - * Check if hostname verification is enabled for this trust strategy. - * - * @return {@code true} if hostname verification has been enabled via {@link #withHostnameVerification()}, {@code false} otherwise. - */ - public boolean isHostnameVerificationEnabled() - { - return hostnameVerificationEnabled; - } - - /** - * Enable hostname verification for this trust strategy. - * - * @return the current trust strategy. - */ - public TrustStrategy withHostnameVerification() - { - hostnameVerificationEnabled = true; - return this; - } - - /** - * Disable hostname verification for this trust strategy. - * - * @return the current trust strategy. - */ - public TrustStrategy withoutHostnameVerification() - { - hostnameVerificationEnabled = false; - return this; - } - - /** - * Use {@link #trustCustomCertificateSignedBy(File)} instead. - * - * @param certFile the trusted certificate file - * @return an authentication config - */ - @Deprecated - public static TrustStrategy trustSignedBy( File certFile ) - { - return new TrustStrategy( Strategy.TRUST_SIGNED_CERTIFICATES, certFile ); - } - - /** - * Only encrypted connections to Neo4j instances with certificates signed by a trusted certificate will be accepted. - * The file specified should contain one or more trusted X.509 certificates. - *

- * The certificate(s) in the file must be encoded using PEM encoding, meaning the certificates in the file should be encoded using Base64, - * and each certificate is bounded at the beginning by "-----BEGIN CERTIFICATE-----", and bounded at the end by "-----END CERTIFICATE-----". - * - * @param certFile the trusted certificate file - * @return an authentication config - */ - public static TrustStrategy trustCustomCertificateSignedBy( File certFile ) - { - return new TrustStrategy( Strategy.TRUST_CUSTOM_CA_SIGNED_CERTIFICATES, certFile ); - } - - /** - * Trust strategy for certificates that can be verified through the local system store. - * - * @return an authentication config - */ - public static TrustStrategy trustSystemCertificates() - { - return new TrustStrategy( Strategy.TRUST_SYSTEM_CA_SIGNED_CERTIFICATES ); - } - - /** - * Trust strategy for certificates that can be verified through the local system store. - * - * @return an authentication config - * @since 1.1 - */ - public static TrustStrategy trustAllCertificates() - { - return new TrustStrategy( Strategy.TRUST_ALL_CERTIFICATES ); - } - - /** - * Automatically trust a Neo4j instance the first time we see it - but fail to connect if its encryption certificate ever changes. - * This is similar to the mechanism used in SSH, and protects against man-in-the-middle attacks that occur after the initial setup of your application. - *

- * Known Neo4j hosts are recorded in a file, {@code certFile}. - * Each time we reconnect to a known host, we verify that its certificate remains the same, guarding against attackers intercepting our communication. - *

- * Note that this approach is vulnerable to man-in-the-middle attacks the very first time you connect to a new Neo4j instance. - * If you do not trust the network you are connecting over, consider using {@link #trustCustomCertificateSignedBy(File)} signed certificates} instead, or manually adding the - * trusted host line into the specified file. - * - * @param knownHostsFile a file where known certificates are stored. - * @return an authentication config - * - * @deprecated in 1.1 in favour of {@link #trustAllCertificates()} - */ - @Deprecated - public static TrustStrategy trustOnFirstUse( File knownHostsFile ) - { - return new TrustStrategy( Strategy.TRUST_ON_FIRST_USE, knownHostsFile ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/ConnectionPoolMetrics.java b/src/graiph-driver/java/org/neo4j/driver/ConnectionPoolMetrics.java deleted file mode 100644 index 15e6f502..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/ConnectionPoolMetrics.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.concurrent.TimeUnit; - -public interface ConnectionPoolMetrics -{ - enum PoolStatus - { - OPEN, CLOSED - } - - /** - * An unique name that identifies this connection pool metrics among all others - * @return An unique name - */ - String id(); - - /** - * The status of the pool. - * @return The status of the pool. - */ - PoolStatus poolStatus(); - - /** - * The amount of connections that are currently in-use (borrowed out of the pool). - * @return The amount of connections that are currently in-use - */ - int inUse(); - - /** - * The amount of connections that are currently idle (buffered inside the pool). - * @return The amount of connections that are currently idle. - */ - int idle(); - - /** - * The amount of connections that are currently waiting to be created. - * The amount is increased by one when the pool noticed a request to create a new connection. - * The amount is decreased by one when the pool noticed a new connection is created successfully or failed to create. - * @return The amount of connections that are waiting to be created. - */ - int creating(); - - /** - * An increasing-only number to record how many connections have been created by this pool successfully since the pool is created. - * @return The amount of connections have ever been created by this pool. - */ - long created(); - - /** - * An increasing-only number to record how many connections have been failed to create. - * @return The amount of connections have been failed to create by this pool. - */ - long failedToCreate(); - - /** - * An increasing-only number to record how many connections have been closed by this pool. - * @return The amount of connections have been closed by this pool. - */ - long closed(); - - /** - * The current count of application requests to wait for acquiring a connection from the pool. - * The reason to wait could be waiting for creating a new connection, or waiting for a connection to be free by application when the pool is full. - * @return The current amount of application request to wait for acquiring a connection from the pool. - */ - int acquiring(); - - /** - * An increasing-only number to record how many connections have been acquired from the pool since the pool is created. - * The connections acquired could hold either a newly created connection or a reused connection from the pool. - * @return The amount of connections that have been acquired from the pool. - */ - long acquired(); - - /** - * An increasing-only number to record how many times that we've failed to acquire a connection from the pool within configured maximum acquisition timeout - * set by {@link Config.ConfigBuilder#withConnectionAcquisitionTimeout(long, TimeUnit)}. - * The connection acquired could hold either a newly created connection or a reused connection from the pool. - * @return The amount of failures to acquire a connection from the pool within maximum connection acquisition timeout. - */ - long timedOutToAcquire(); - - /** - * The total acquisition time in milliseconds of all connection acquisition requests since the pool is created. - * See {@link ConnectionPoolMetrics#acquired()} for the total amount of connection acquired since the driver is created. - * The average acquisition time can be calculated using the code bellow: - *

Example

- *
-     * {@code
-     * ConnectionPoolMetrics previous = ConnectionPoolMetrics.EMPTY;
-     * ...
-     * ConnectionPoolMetrics current = poolMetrics.snapshot();
-     * double average = computeAverage(current.totalAcquisitionTime(), previous.totalAcquisitionTime(), current.acquired(), previous.acquired());
-     * previous = current;
-     * ...
-     *
-     * private static double computeAverage(double currentSum, double previousSum, double currentCount, double previousCount)
-     * {
-     *     return (currentSum-previousSum)/(currentCount-previousCount);
-     * }
-     * }
-     * 
- * @return The total acquisition time since the driver is created. - */ - long totalAcquisitionTime(); - - /** - * The total time in milliseconds spent to establishing new socket connections since the pool is created. - * See {@link ConnectionPoolMetrics#created()} for the total amount of connections established since the pool is created. - * The average connection time can be calculated using the code bellow: - *

Example

- *
-     * {@code
-     * ConnectionPoolMetrics previous = ConnectionPoolMetrics.EMPTY;
-     * ...
-     * ConnectionPoolMetrics current = poolMetrics.snapshot();
-     * double average = computeAverage(current.totalConnectionTime(), previous.totalConnectionTime(), current.created(), previous.created());
-     * previous = current;
-     * ...
-     *
-     * private static double computeAverage(double currentSum, double previousSum, double currentCount, double previousCount)
-     * {
-     *     return (currentSum-previousSum)/(currentCount-previousCount);
-     * }
-     * }
-     * 
- * @return The total connection time since the driver is created. - */ - long totalConnectionTime(); - - /** - * The total time in milliseconds connections are borrowed out of the pool, such as the time spent in user's application code to run cypher queries. - * See {@link ConnectionPoolMetrics#totalInUseCount()} for the total amount of connections that are borrowed out of the pool. - * The average in-use time can be calculated using the code bellow: - *

Example

- *
-     * {@code
-     * ConnectionPoolMetrics previous = ConnectionPoolMetrics.EMPTY;
-     * ...
-     * ConnectionPoolMetrics current = poolMetrics.snapshot();
-     * double average = computeAverage(current.totalInUseTime(), previous.totalInUseTime(), current.totalInUseCount(), previous.totalInUseCount());
-     * previous = current;
-     * ...
-     *
-     * private static double computeAverage(double currentSum, double previousSum, double currentCount, double previousCount)
-     * {
-     *     return (currentSum-previousSum)/(currentCount-previousCount);
-     * }
-     * }
-     * 
- * @return the total time connections are used outside the pool. - */ - long totalInUseTime(); - - /** - * The total amount of connections that are borrowed outside the pool since the pool is created. - * @return the total amount of connection that are borrowed outside the pool. - */ - long totalInUseCount(); - - /** - * Returns a snapshot of this connection pool metrics. - * @return a snapshot of this connection pool metrics. - */ - ConnectionPoolMetrics snapshot(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Driver.java b/src/graiph-driver/java/org/neo4j/driver/Driver.java deleted file mode 100644 index 9c81ed8e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Driver.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.concurrent.CompletionStage; -import java.util.function.Consumer; - -import org.neo4j.driver.async.AsyncSession; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.internal.SessionParameters; -import org.neo4j.driver.reactive.RxSession; - -/** - * Accessor for a specific Neo4j graph database. - *

- * Driver implementations are typically thread-safe, act as a template - * for session creation and host a connection pool. All configuration - * and authentication settings are held immutably by the Driver. Should - * different settings be required, a new Driver instance should be created. - *

- * A driver maintains a connection pool for each remote Neo4j server. Therefore - * the most efficient way to make use of a Driver is to use the same instance - * across the application. - *

- * To construct a new Driver, use one of the - * {@link GraphDatabase#driver(String, AuthToken) GraphDatabase.driver} methods. - * The URI passed to - * this method determines the type of Driver created. - *
- * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
Available schemes and drivers
URI SchemeDriver
boltDirect driver: connects directly to the host and port specified in the URI.
neo4jRouting driver: can automatically discover members of a Causal Cluster and route {@link Session sessions} based on {@link AccessMode}.
- * - * @since 1.0 (Modified and Added {@link AsyncSession} and {@link RxSession} since 2.0) - */ -public interface Driver extends AutoCloseable -{ - /** - * Return a flag to indicate whether or not encryption is used for this driver. - * - * @return true if the driver requires encryption, false otherwise - */ - boolean isEncrypted(); - - /** - * Create a new general purpose {@link Session} with default {@link SessionParameters session parameters}. - *

- * Alias to {@link #session(Consumer)}}. - * - * @return a new {@link Session} object. - */ - Session session(); - - /** - * Create a new {@link Session} with a specified {@link SessionParametersTemplate}. - * @param templateConsumer specifies how the session parameter shall be built for this session. - * @return a new {@link Session} object. - * @see SessionParameters - */ - Session session( Consumer templateConsumer ); - /** - * Close all the resources assigned to this driver, including open connections and IO threads. - *

- * This operation works the same way as {@link #closeAsync()} but blocks until all resources are closed. - */ - @Override - void close(); - - /** - * Close all the resources assigned to this driver, including open connections and IO threads. - *

- * This operation is asynchronous and returns a {@link CompletionStage}. This stage is completed with - * {@code null} when all resources are closed. It is completed exceptionally if termination fails. - * - * @return a {@link CompletionStage completion stage} that represents the asynchronous close. - */ - CompletionStage closeAsync(); - - /** - * Returns the driver metrics if metrics reporting is enabled via {@link Config.ConfigBuilder#withDriverMetrics()}. - * Otherwise a {@link ClientException} will be thrown. - * @return the driver metrics if enabled. - * @throws ClientException if the driver metrics reporting is not enabled. - */ - Metrics metrics(); - - /** - * Create a new general purpose {@link RxSession} with default {@link SessionParameters session parameters}. - * The {@link RxSession} provides a reactive way to run queries and process results. - *

- * Alias to {@link #rxSession(Consumer)}}. - * - * @return @return a new {@link RxSession} object. - */ - RxSession rxSession(); - - /** - * Create a new {@link RxSession} with a specified {@link SessionParametersTemplate}. - * The {@link RxSession} provides a reactive way to run queries and process results. - * @param templateConsumer used to customize the session parameters. - * @return @return a new {@link RxSession} object. - */ - RxSession rxSession( Consumer templateConsumer ); - - /** - * Create a new general purpose {@link AsyncSession} with default {@link SessionParameters session parameters}. - * The {@link AsyncSession} provides an asynchronous way to run queries and process results. - *

- * Alias to {@link #asyncSession(Consumer)}}. - * - * @return @return a new {@link AsyncSession} object. - */ - AsyncSession asyncSession(); - - /** - * Create a new {@link AsyncSession} with a specified {@link SessionParametersTemplate}. - * The {@link AsyncSession} provides an asynchronous way to run queries and process results. - * - * @param templateConsumer used to customize the session parameters. - * @return a new {@link AsyncSession} object. - */ - AsyncSession asyncSession( Consumer templateConsumer ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Logger.java b/src/graiph-driver/java/org/neo4j/driver/Logger.java deleted file mode 100644 index 747915ac..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Logger.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -/** - * Logs messages for driver activity. - *

- * Some methods in this interface take a message template together with a list of parameters. These methods are expected to construct the final - * message only if the needed logging level is enabled. Driver expects formatting to be done using {@link String#format(String, Object...)} method. - * Thus all supplied message templates will contain "%s" as parameter placeholders. This is different from all SLF4J-compatible logging frameworks - * where parameter placeholder is "{}". Implementations of this interface should adapt placeholders from "%s" to "{}", if required. - */ -public interface Logger -{ - /** - * Logs errors from this driver. - *

- * Examples of errors logged using this method: - *

    - *
  • Network connection errors
  • - *
  • DNS resolution errors
  • - *
  • Cluster discovery errors
  • - *
- * - * @param message the error message. - * @param cause the cause of the error. - */ - void error( String message, Throwable cause ); - - /** - * Logs information from the driver. - *

- * Example of info messages logged using this method: - *

    - *
  • Driver creation and shutdown
  • - *
  • Cluster discovery progress
  • - *
- * - * @param message the information message template. Can contain {@link String#format(String, Object...)}-style placeholders, like "%s". - * @param params parameters used in the information message. - */ - void info( String message, Object... params ); - - /** - * Logs warnings that happened when using the driver. - *

- * Example of info messages logged using this method: - *

    - *
  • Usage of deprecated APIs
  • - *
  • Transaction retry failures
  • - *
- * - * @param message the warning message template. Can contain {@link String#format(String, Object...)}-style placeholders, like "%s". - * @param params parameters used in the warning message. - */ - void warn( String message, Object... params ); - - /** - * Logs warnings that happened during using the driver - * - *

- * Example of info messages logged using this method: - *

    - *
  • Usage of deprecated APIs
  • - *
  • Transaction retry failures
  • - *
- * - * @param message the warning message - * @param cause the cause of the warning - */ - void warn( String message, Throwable cause ); - - /** - * Logs bolt messages sent and received by this driver. - * It is only enabled when {@link Logger#isDebugEnabled()} returns {@code true}. - * This logging level generates a lot of log entries. - *

- * Example of debug messages logged using this method: - *

    - *
  • Connection pool events, like creation, acquire and release of connections
  • - *
  • Messages sent to the database
  • - *
  • Messages received from the database
  • - *
- * - * @param message the debug message template. Can contain {@link String#format(String, Object...)}-style placeholders, like "%s". - * @param params parameters used in generating the bolt message - */ - void debug( String message, Object... params ); - - /** - * Logs binary sent and received by this driver. - * It is only enabled when {@link Logger#isTraceEnabled()} returns {@code true}. - * This logging level generates huge amount of log entries. - * - *

- * Example of debug messages logged using this method: - *

    - *
  • Idle connection pings
  • - *
  • Server selection for load balancing
  • - *
  • Messages sent to the database with bytes in hex
  • - *
  • Messages received from the database with bytes in hex
  • - *
- * - * @param message the trace message template. Can contain {@link String#format(String, Object...)}-style placeholders, like "%s". - * @param params parameters used in generating the hex message - */ - void trace( String message, Object... params ); - - /** - * Return true if the trace logging level is enabled. - * - * @return true if the trace logging level is enabled. - * @see Logger#trace(String, Object...) - */ - boolean isTraceEnabled(); - - /** - * Return true if the debug level is enabled. - * - * @return true if the debug level is enabled. - * @see Logger#debug(String, Object...) - */ - boolean isDebugEnabled(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Logging.java b/src/graiph-driver/java/org/neo4j/driver/Logging.java deleted file mode 100644 index 5b4ec20e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Logging.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.logging.ConsoleHandler; -import java.util.logging.Level; - -import org.neo4j.driver.internal.logging.ConsoleLogging; -import org.neo4j.driver.internal.logging.JULogging; -import org.neo4j.driver.internal.logging.Slf4jLogging; - -import static org.neo4j.driver.internal.logging.DevNullLogging.DEV_NULL_LOGGING; - -/** - * Accessor for {@link Logger} instances. Configured once for a driver instance using {@link Config.ConfigBuilder#withLogging(Logging)} builder method. - * Users are expected to either implement this interface or use one of the existing implementations (available via static methods in this interface): - *
    - *
  • {@link #slf4j() SLF4J logging} - uses available SLF4J binding (Logback, Log4j, etc.) fails when no SLF4J implementation is available. Uses - * application's logging configuration from XML or other type of configuration file. This logging method is the preferred one and relies on the SLF4J - * implementation available in the classpath or modulepath.
  • - *
  • {@link #javaUtilLogging(Level) Java Logging API (JUL)} - uses {@link java.util.logging.Logger} created via - * {@link java.util.logging.Logger#getLogger(String)}. Global java util logging configuration applies. This logging method is suitable when application - * uses JUL for logging and explicitly configures it.
  • - *
  • {@link #console(Level) Console logging} - uses {@link ConsoleHandler} with the specified {@link Level logging level} to print messages to {@code - * System.err}. This logging method is suitable for quick debugging or prototyping.
  • - *
  • {@link #none() No logging} - implementation that discards all logged messages. This logging method is suitable for testing to make driver produce - * no output.
  • - *
- *

- * Driver logging API defines the following log levels: ERROR, INFO, WARN, DEBUG and TRACE. They are similar to levels defined by SLF4J but different from - * log levels defined for {@link java.util.logging}. The following mapping takes place: - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
Driver and JUL log levels
Driverjava.util.logging
ERRORSEVERE
INFOINFO, CONFIG
WARNWARNING
DEBUGFINE, FINER
TRACEFINEST
- *

- * Example of driver configuration with SLF4J logging: - *

- * {@code
- * Driver driver = GraphDatabase.driver("bolt://localhost:7687",
- *                                         AuthTokens.basic("neo4j", "password"),
- *                                         Config.build().withLogging(Logging.slf4j()).toConfig());
- * }
- * 
- * - * @see Logger - * @see Config.ConfigBuilder#withLogging(Logging) - */ -public interface Logging -{ - /** - * Obtain a {@link Logger} instance by name. - * - * @param name name of a {@link Logger} - * @return {@link Logger} instance - */ - Logger getLog( String name ); - - /** - * Create logging implementation that uses SLF4J. - * - * @return new logging implementation. - * @throws IllegalStateException if SLF4J is not available. - */ - static Logging slf4j() - { - RuntimeException unavailabilityError = Slf4jLogging.checkAvailability(); - if ( unavailabilityError != null ) - { - throw unavailabilityError; - } - return new Slf4jLogging(); - } - - /** - * Create logging implementation that uses {@link java.util.logging}. - * - * @param level the log level. - * @return new logging implementation. - */ - static Logging javaUtilLogging( Level level ) - { - return new JULogging( level ); - } - - /** - * Create logging implementation that uses {@link java.util.logging} to log to {@code System.err}. - * - * @param level the log level. - * @return new logging implementation. - */ - static Logging console( Level level ) - { - return new ConsoleLogging( level ); - } - - /** - * Create logging implementation that discards all messages and logs nothing. - * - * @return new logging implementation. - */ - static Logging none() - { - return DEV_NULL_LOGGING; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Metrics.java b/src/graiph-driver/java/org/neo4j/driver/Metrics.java deleted file mode 100644 index 7096be1e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Metrics.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.Map; - -public interface Metrics -{ - /** - * A map of connection pool metrics. - * The {@link ConnectionPoolMetrics#id()} are used as the keys of the map. - * @return The connection pool metrics. - */ - Map connectionPoolMetrics(); - - /** - * Returns a snapshot of this metrics. - * @return a snapshot of this metrics. - */ - Metrics snapshot(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Record.java b/src/graiph-driver/java/org/neo4j/driver/Record.java deleted file mode 100644 index 2e63549f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Record.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.List; -import java.util.Map; - -import org.neo4j.driver.internal.value.NullValue; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.exceptions.NoSuchRecordException; -import org.neo4j.driver.types.MapAccessorWithDefaultValue; -import java.util.function.Function; -import org.neo4j.driver.util.Immutable; -import org.neo4j.driver.util.Pair; - -/** - * Container for Cypher result values. - *

- * Streams of records are returned from Cypher statement execution, contained - * within a {@link StatementResult}. - *

- * A record is a form of ordered map and, as such, contained values can be - * accessed by either positional {@link #get(int) index} or textual - * {@link #get(String) key}. - * - * @since 1.0 - */ -@Immutable -public interface Record extends MapAccessorWithDefaultValue -{ - /** - * Retrieve the keys of the underlying map - * - * @return all field keys in order - */ - List keys(); - - /** - * Retrieve the values of the underlying map - * - * @return all field keys in order - */ - List values(); - - /** - * Check if the list of keys contains the given key - * - * @param key the key - * @return {@code true} if this map keys contains the given key otherwise {@code false} - */ - boolean containsKey( String key ); - - /** - * Retrieve the index of the field with the given key - * - * @throws java.util.NoSuchElementException if the given key is not from {@link #keys()} - * @param key the give key - * @return the index of the field as used by {@link #get(int)} - */ - int index( String key ); - - /** - * Retrieve the value of the property with the given key - * - * @param key the key of the property - * @return the property's value or a {@link NullValue} if no such key exists - * @throws NoSuchRecordException if the associated underlying record is not available - */ - Value get( String key ); - - /** - * Retrieve the value at the given field index - * - * @param index the index of the value - * @return the value or a {@link org.neo4j.driver.internal.value.NullValue} if the index is out of bounds - * @throws ClientException if record has not been initialized - */ - Value get( int index ); - - /** - * Retrieve the number of fields in this record - * - * @return the number of fields in this record - */ - int size(); - - /** - * Return this record as a map, where each value has been converted to a default - * java object using {@link Value#asObject()}. - * - * This is equivalent to calling {@link #asMap(Function)} with {@link Values#ofObject()}. - * - * @return this record as a map - */ - Map asMap(); - - /** - * Return this record as a map, where each value has been converted using the provided - * mapping function. You can find a library of common mapping functions in {@link Values}. - * - * @see Values for a long list of built-in conversion functions - * @param mapper the mapping function - * @param the type to convert to - * @return this record as a map - */ - Map asMap( Function mapper ); - - /** - * Retrieve all record fields - * - * @return all fields in key order - * @throws NoSuchRecordException if the associated underlying record is not available - */ - List> fields(); - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Records.java b/src/graiph-driver/java/org/neo4j/driver/Records.java deleted file mode 100644 index d71b8241..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Records.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.function.Function; - -/** - * Static utility methods for retaining records - * - * @see StatementResult#list() - * @since 1.0 - */ -public abstract class Records -{ - public static Function column( int index ) - { - return column( index, Values.ofValue() ); - } - - public static Function column( String key ) - { - return column( key, Values.ofValue() ); - } - - public static Function column( final int index, final Function mapFunction ) - { - return new Function() - { - @Override - public T apply( Record record ) - { - return mapFunction.apply( record.get( index ) ); - } - }; - } - public static Function column( final String key, final Function mapFunction ) - { - return new Function() - { - @Override - public T apply( Record recordAccessor ) - { - return mapFunction.apply( recordAccessor.get( key ) ); - } - }; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Session.java b/src/graiph-driver/java/org/neo4j/driver/Session.java deleted file mode 100644 index d65fbf3c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Session.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.Map; - -import org.neo4j.driver.async.AsyncSession; -import org.neo4j.driver.util.Resource; - -/** - * Provides a context of work for database interactions. - *

- * A Session hosts a series of {@linkplain Transaction transactions} - * carried out against a database. Within the database, all statements are - * carried out within a transaction. Within application code, however, it is - * not always necessary to explicitly {@link #beginTransaction() begin a - * transaction}. If a statement is {@link #run} directly against a {@link - * Session}, the server will automatically BEGIN and - * COMMIT that statement within its own transaction. This type - * of transaction is known as an autocommit transaction. - *

- * Explicit transactions allow multiple statements to be committed as part of - * a single atomic operation and can be rolled back if necessary. They can also - * be used to ensure causal consistency, meaning that an application - * can run a series of queries on different members of a cluster, while - * ensuring that each query sees the state of graph at least as up-to-date as - * the graph seen by the previous query. For more on causal consistency, see - * the Neo4j clustering manual. - *

- * Typically, a session will acquire a TCP connection to execute query or - * transaction. Such a connection will be acquired from a connection pool - * and released back there when query result is consumed or transaction is - * committed or rolled back. One connection can therefore be adopted by many - * sessions, although by only one at a time. Application code should never need - * to deal directly with connection management. - *

- * A session inherits its destination address and permissions from its - * underlying connection. This means that for a single query/transaction one - * session may only ever target one machine within a cluster and does not - * support re-authentication. To achieve otherwise requires creation of a - * separate session. - *

- * Similarly, multiple sessions should be used when working with concurrency; - * session implementations are not thread safe. - * - * @since 1.0 (Removed async API to {@link AsyncSession} in 2.0) - */ -public interface Session extends Resource, StatementRunner -{ - /** - * Begin a new explicit {@linkplain Transaction transaction}. At - * most one transaction may exist in a session at any point in time. To - * maintain multiple concurrent transactions, use multiple concurrent - * sessions. - * - * @return a new {@link Transaction} - */ - Transaction beginTransaction(); - - /** - * Begin a new explicit {@linkplain Transaction transaction} with the specified {@link TransactionConfig configuration}. - * At most one transaction may exist in a session at any point in time. To - * maintain multiple concurrent transactions, use multiple concurrent - * sessions. - * - * @param config configuration for the new transaction. - * @return a new {@link Transaction} - */ - Transaction beginTransaction( TransactionConfig config ); - - /** - * Execute given unit of work in a {@link AccessMode#READ read} transaction. - *

- * Transaction will automatically be committed unless exception is thrown from the unit of work itself or from - * {@link Transaction#close()} or transaction is explicitly marked for failure via {@link Transaction#failure()}. - * - * @param work the {@link TransactionWork} to be applied to a new read transaction. - * @param the return type of the given unit of work. - * @return a result as returned by the given unit of work. - */ - T readTransaction( TransactionWork work ); - - /** - * Execute given unit of work in a {@link AccessMode#READ read} transaction with the specified {@link TransactionConfig configuration}. - *

- * Transaction will automatically be committed unless exception is thrown from the unit of work itself or from - * {@link Transaction#close()} or transaction is explicitly marked for failure via {@link Transaction#failure()}. - * - * @param work the {@link TransactionWork} to be applied to a new read transaction. - * @param config configuration for all transactions started to execute the unit of work. - * @param the return type of the given unit of work. - * @return a result as returned by the given unit of work. - */ - T readTransaction( TransactionWork work, TransactionConfig config ); - - /** - * Execute given unit of work in a {@link AccessMode#WRITE write} transaction. - *

- * Transaction will automatically be committed unless exception is thrown from the unit of work itself or from - * {@link Transaction#close()} or transaction is explicitly marked for failure via {@link Transaction#failure()}. - * - * @param work the {@link TransactionWork} to be applied to a new write transaction. - * @param the return type of the given unit of work. - * @return a result as returned by the given unit of work. - */ - T writeTransaction( TransactionWork work ); - - /** - * Execute given unit of work in a {@link AccessMode#WRITE write} transaction with the specified {@link TransactionConfig configuration}. - *

- * Transaction will automatically be committed unless exception is thrown from the unit of work itself or from - * {@link Transaction#close()} or transaction is explicitly marked for failure via {@link Transaction#failure()}. - * - * @param work the {@link TransactionWork} to be applied to a new write transaction. - * @param config configuration for all transactions started to execute the unit of work. - * @param the return type of the given unit of work. - * @return a result as returned by the given unit of work. - */ - T writeTransaction( TransactionWork work, TransactionConfig config ); - - /** - * Run a statement in an auto-commit transaction with the specified {@link TransactionConfig configuration} and return a result stream. - * - * @param statement text of a Neo4j statement. - * @param config configuration for the new transaction. - * @return a stream of result values and associated metadata. - */ - StatementResult run( String statement, TransactionConfig config ); - - /** - * Run a statement with parameters in an auto-commit transaction with specified {@link TransactionConfig configuration} and return a result stream. - *

- * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - *

- * This version of run takes a {@link Map} of parameters. The values in the map - * must be values that can be converted to Neo4j types. See {@link Values#parameters(Object...)} for - * a list of allowed types. - * - *

Example

- *
-     * {@code
-     * Map metadata = new HashMap<>();
-     * metadata.put("type", "update name");
-     *
-     * TransactionConfig config = TransactionConfig.builder()
-     *                 .withTimeout(Duration.ofSeconds(3))
-     *                 .withMetadata(metadata)
-     *                 .build();
-     *
-     * Map parameters = new HashMap<>();
-     * parameters.put("myNameParam", "Bob");
-     *
-     * StatementResult cursor = session.run("MATCH (n) WHERE n.name = {myNameParam} RETURN (n)", parameters, config);
-     * }
-     * 
- * - * @param statement text of a Neo4j statement. - * @param parameters input data for the statement. - * @param config configuration for the new transaction. - * @return a stream of result values and associated metadata. - */ - StatementResult run( String statement, Map parameters, TransactionConfig config ); - - /** - * Run a statement in an auto-commit transaction with specified {@link TransactionConfig configuration} and return a result stream. - *

Example

- *
-     * {@code
-     * Map metadata = new HashMap<>();
-     * metadata.put("type", "update name");
-     *
-     * TransactionConfig config = TransactionConfig.builder()
-     *                 .withTimeout(Duration.ofSeconds(3))
-     *                 .withMetadata(metadata)
-     *                 .build();
-     *
-     * Statement statement = new Statement("MATCH (n) WHERE n.name=$myNameParam RETURN n.age");
-     * StatementResult cursor = session.run(statement.withParameters(Values.parameters("myNameParam", "Bob")));
-     * }
-     * 
- * - * @param statement a Neo4j statement. - * @param config configuration for the new transaction. - * @return a stream of result values and associated metadata. - */ - StatementResult run( Statement statement, TransactionConfig config ); - - /** - * Return the bookmark received following the last completed - * {@linkplain Transaction transaction}. If no bookmark was received - * or if this transaction was rolled back, the bookmark value will - * be null. - * - * @return a reference to a previous transaction - */ - String lastBookmark(); - - /** - * Reset the current session. This sends an immediate RESET signal to the server which both interrupts - * any statement that is currently executing and ignores any subsequently queued statements. Following - * the reset, the current transaction will have been rolled back and any outstanding failures will - * have been acknowledged. - * - * @deprecated This method should not be used and violates the expected usage pattern of {@link Session} objects. - * They are expected to be not thread-safe and should not be shared between thread. However this method is only - * useful when {@link Session} object is passed to another monitoring thread that calls it when appropriate. - * It is not useful when {@link Session} is used in a single thread because in this case {@link #close()} - * can be used. Since version 3.1, Neo4j database allows users to specify maximum transaction execution time and - * contains procedures to list and terminate running queries. These functions should be used instead of calling - * this method. - */ - @Deprecated - void reset(); - - /** - * Signal that you are done using this session. In the default driver usage, closing and accessing sessions is - * very low cost. - */ - @Override - void close(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/SessionParametersTemplate.java b/src/graiph-driver/java/org/neo4j/driver/SessionParametersTemplate.java deleted file mode 100644 index 0cd0108f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/SessionParametersTemplate.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.List; - -/** - * The template used to configure session parameters which will be used to create a session. - */ -public interface SessionParametersTemplate -{ - /** - * Set the initial bookmarks to be used in a session. - * First transaction in a session will ensure that server hosting is at least as up-to-date as the - * latest transaction referenced by the supplied bookmarks. - * - * @param bookmarks a series of initial bookmarks. Both {@code null} value and empty array - * are permitted, and indicate that the bookmarks do not exist or are unknown. - * @return this builder. - */ - SessionParametersTemplate withBookmarks( String... bookmarks ); - - /** - * Set the initial bookmarks to be used in a session. - * First transaction in a session will ensure that server hosting is at least as up-to-date as the - * latest transaction referenced by the supplied bookmarks. - * - * @param bookmarks initial references to some previous transactions. Both {@code null} value and empty iterable - * are permitted, and indicate that the bookmarks do not exist or are unknown. - * @return this builder - */ - SessionParametersTemplate withBookmarks( List bookmarks ); - - /** - * Set the type of access required by units of work in this session, - * e.g. {@link AccessMode#READ read access} or {@link AccessMode#WRITE write access}. - * - * @param mode access mode. - * @return this builder. - */ - SessionParametersTemplate withDefaultAccessMode( AccessMode mode ); - - /** - * Set the database that the newly created session is going to connect to. - * The given database name cannot be null. - * If the database name is not set, then the default database configured on the server configuration will be connected when the session established. - * - * @param database the database the session going to connect to. - * @return this builder. - */ - SessionParametersTemplate withDatabase( String database ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Statement.java b/src/graiph-driver/java/org/neo4j/driver/Statement.java deleted file mode 100644 index fea533f6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Statement.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.Map; - -import org.neo4j.driver.internal.value.MapValue; -import org.neo4j.driver.summary.ResultSummary; -import org.neo4j.driver.util.Immutable; - -import static java.lang.String.format; -import static org.neo4j.driver.internal.util.Iterables.newHashMapWithSize; -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; -import static org.neo4j.driver.Values.ofValue; -import static org.neo4j.driver.Values.value; - -/** - * An executable statement, i.e. the statements' text and its parameters. - * - * @see Session - * @see Transaction - * @see StatementResult - * @see StatementResult#consume() - * @see ResultSummary - * @since 1.0 - */ -@Immutable -public class Statement -{ - private final String text; - private final Value parameters; - - /** - * Create a new statement. - * @param text the statement text - * @param parameters the statement parameters - */ - public Statement( String text, Value parameters ) - { - this.text = validateQuery( text ); - if( parameters == null ) - { - this.parameters = Values.EmptyMap; - } - else if ( parameters instanceof MapValue ) - { - this.parameters = parameters; - } - else - { - throw new IllegalArgumentException( "The parameters should be provided as Map type. Unsupported parameters type: " + parameters.type().name() ); - } - } - - /** - * Create a new statement. - * @param text the statement text - * @param parameters the statement parameters - */ - public Statement( String text, Map parameters ) - { - this( text, Values.value( parameters ) ); - } - - /** - * Create a new statement. - * @param text the statement text - */ - public Statement( String text ) - { - this( text, Values.EmptyMap ); - } - - /** - * @return the statement's text - */ - public String text() - { - return text; - } - - /** - * @return the statement's parameters - */ - public Value parameters() - { - return parameters; - } - - /** - * @param newText the new statement's text - * @return a new statement with updated text - */ - public Statement withText( String newText ) - { - return new Statement( newText, parameters ); - } - - /** - * @param newParameters the new statement's parameters - * @return a new statement with updated parameters - */ - public Statement withParameters( Value newParameters ) - { - return new Statement( text, newParameters ); - } - - /** - * @param newParameters the new statement's parameters - * @return a new statement with updated parameters - */ - public Statement withParameters( Map newParameters ) - { - return new Statement( text, newParameters ); - } - - /** - * Create a new statement with new parameters derived by updating this' - * statement's parameters using the given updates. - * - * Every update key that points to a null value will be removed from - * the new statement's parameters. All other entries will just replace - * any existing parameter in the new statement. - * - * @param updates describing how to update the parameters - * @return a new statement with updated parameters - */ - public Statement withUpdatedParameters( Value updates ) - { - if ( updates == null || updates.isEmpty() ) - { - return this; - } - else - { - Map newParameters = newHashMapWithSize( Math.max( parameters.size(), updates.size() ) ); - newParameters.putAll( parameters.asMap( ofValue() ) ); - for ( Map.Entry entry : updates.asMap( ofValue() ).entrySet() ) - { - Value value = entry.getValue(); - if ( value.isNull() ) - { - newParameters.remove( entry.getKey() ); - } - else - { - newParameters.put( entry.getKey(), value ); - } - } - return withParameters( value(newParameters) ); - } - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - Statement statement = (Statement) o; - return text.equals( statement.text ) && parameters.equals( statement.parameters ); - - } - - @Override - public int hashCode() - { - int result = text.hashCode(); - result = 31 * result + parameters.hashCode(); - return result; - } - - @Override - public String toString() - { - return format( "Statement{text='%s', parameters=%s}", text, parameters ); - } - - private static String validateQuery( String query ) - { - checkArgument( query != null, "Cypher query should not be null" ); - checkArgument( !query.isEmpty(), "Cypher query should not be an empty string" ); - return query; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/StatementResult.java b/src/graiph-driver/java/org/neo4j/driver/StatementResult.java deleted file mode 100644 index 912f2d1c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/StatementResult.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.Iterator; -import java.util.List; -import java.util.stream.Stream; - -import org.neo4j.driver.exceptions.NoSuchRecordException; -import org.neo4j.driver.summary.ResultSummary; -import java.util.function.Function; -import org.neo4j.driver.util.Resource; - - -/** - * The result of running a Cypher statement, conceptually a stream of {@link Record records}. - * - * The standard way of navigating through the result returned by the database is to - * {@link #next() iterate} over it. - * - * Results are valid until the next statement is run or until the end of the current transaction, - * whichever comes first. To keep a result around while further statements are run, or to use a result outside the scope - * of the current transaction, see {@link #list()}. - * - *

Important note on semantics

- * - * In order to handle very large results, and to minimize memory overhead and maximize - * performance, results are retrieved lazily. Please see {@link StatementRunner} for - * important details on the effects of this. - * - * The short version is that, if you want a hard guarantee that the underlying statement - * has completed, you need to either call {@link Resource#close()} on the {@link Transaction} - * or {@link Session} that created this result, or you need to use the result. - * - * Calling any method on this interface will guarantee that any write operation has completed on - * the remote database. - * - * @since 1.0 - */ -public interface StatementResult extends Iterator -{ - /** - * Retrieve the keys of the records this result contains. - * - * @return all keys - */ - List keys(); - - /** - * Test if there is another record we can navigate to in this result. - * @return true if {@link #next()} will return another record - */ - @Override boolean hasNext(); - - /** - * Navigate to and retrieve the next {@link Record} in this result. - * - * @throws NoSuchRecordException if there is no record left in the stream - * @return the next record - */ - @Override Record next(); - - /** - * Return the first record in the result, failing if there is not exactly - * one record left in the stream - * - * Calling this method always exhausts the result, even when {@link NoSuchRecordException} is thrown. - * - * @return the first and only record in the stream - * @throws NoSuchRecordException if there is not exactly one record left in the stream - */ - Record single() throws NoSuchRecordException; - - /** - * Investigate the next upcoming record without moving forward in the result. - * - * @throws NoSuchRecordException if there is no record left in the stream - * @return the next record - */ - Record peek(); - - /** - * Convert this result to a sequential {@link Stream} of records. - *

- * Result is exhausted when a terminal operation on the returned stream is executed. - * - * @return sequential {@link Stream} of records. Empty stream if this result has already been consumed or is empty. - */ - Stream stream(); - - /** - * Retrieve and store the entire result stream. - * This can be used if you want to iterate over the stream multiple times or to store the - * whole result for later use. - * - * Note that this method can only be used if you know that the statement that - * yielded this result returns a finite stream. Some statements can yield - * infinite results, in which case calling this method will lead to running - * out of memory. - * - * Calling this method exhausts the result. - * - * @return list of all remaining immutable records - */ - List list(); - - /** - * Retrieve and store a projection of the entire result. - * This can be used if you want to iterate over the stream multiple times or to store the - * whole result for later use. - * - * Note that this method can only be used if you know that the statement that - * yielded this result returns a finite stream. Some statements can yield - * infinite results, in which case calling this method will lead to running - * out of memory. - * - * Calling this method exhausts the result. - * - * @param mapFunction a function to map from Record to T. See {@link Records} for some predefined functions. - * @param the type of result list elements - * @return list of all mapped remaining immutable records - */ - List list( Function mapFunction ); - - /** - * Consume the entire result, yielding a summary of it. - * - * Calling this method exhausts the result. - * - *

-     * {@code
-     * ResultSummary summary = session.run( "PROFILE MATCH (n:User {id: 12345}) RETURN n" ).consume();
-     * }
-     * 
- * - * @return a summary for the whole query result - */ - ResultSummary consume(); - - /** - * Return the result summary. - * - * If the records in the result is not fully consumed, then calling this method will force to pull all remaining - * records into buffer to yield the summary. - * - * If you want to obtain the summary but discard the records, use - * {@link StatementResult#consume()} instead. - * - * @return a summary for the whole query result. - */ - ResultSummary summary(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/StatementRunner.java b/src/graiph-driver/java/org/neo4j/driver/StatementRunner.java deleted file mode 100644 index b643ae34..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/StatementRunner.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.util.Map; - -import org.neo4j.driver.types.TypeSystem; -import org.neo4j.driver.util.Experimental; - -/** - * Common interface for components that can execute Neo4j statements. - * - *

Important notes on semantics

- *

- * Statements run in the same {@link StatementRunner} are guaranteed - * to execute in order, meaning changes made by one statement will be seen - * by all subsequent statements in the same {@link StatementRunner}. - *

- * However, to allow handling very large results, and to improve performance, - * result streams are retrieved lazily from the network. - * This means that when any of {@link #run(Statement)} - * methods return a result, the statement has only started executing - it may not - * have completed yet. Most of the time, you will not notice this, because the - * driver automatically waits for statements to complete at specific points to - * fulfill its contracts. - *

- * Specifically, the driver will ensure all outstanding statements are completed - * whenever you: - * - *

    - *
  • Read from or discard a result, for instance via - * {@link StatementResult#next()} or {@link StatementResult#consume()}
  • - *
  • Explicitly commit/rollback a transaction using blocking {@link Transaction#close()}
  • - *
  • Close a session using blocking {@link Session#close()}
  • - *
- *

- * As noted, most of the time, you will not need to consider this - your writes will - * always be durably stored as long as you either use the results, explicitly commit - * {@link Transaction transactions} or close the session you used using {@link Session#close()}. - *

- * While these semantics introduce some complexity, it gives the driver the ability - * to handle infinite result streams (like subscribing to events), significantly lowers - * the memory overhead for your application and improves performance. - * - * @see Session - * @see Transaction - * @since 1.0 - */ -public interface StatementRunner -{ - /** - * Run a statement and return a result stream. - *

- * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - *

- * This particular method takes a {@link Value} as its input. This is useful - * if you want to take a map-like value that you've gotten from a prior result - * and send it back as parameters. - *

- * If you are creating parameters programmatically, {@link #run(String, Map)} - * might be more helpful, it converts your map to a {@link Value} for you. - * - *

Example

- *
-     * {@code
-     *
-     * StatementResult cursor = session.run( "MATCH (n) WHERE n.name = {myNameParam} RETURN (n)",
-     *                                       Values.parameters( "myNameParam", "Bob" ) );
-     * }
-     * 
- * - * @param statementTemplate text of a Neo4j statement - * @param parameters input parameters, should be a map Value, see {@link Values#parameters(Object...)}. - * @return a stream of result values and associated metadata - */ - StatementResult run( String statementTemplate, Value parameters ); - - /** - * Run a statement and return a result stream. - *

- * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - *

- * This version of run takes a {@link Map} of parameters. The values in the map - * must be values that can be converted to Neo4j types. See {@link Values#parameters(Object...)} for - * a list of allowed types. - * - *

Example

- *
-     * {@code
-     *
-     * Map parameters = new HashMap();
-     * parameters.put("myNameParam", "Bob");
-     *
-     * StatementResult cursor = session.run( "MATCH (n) WHERE n.name = {myNameParam} RETURN (n)",
-     *                                       parameters );
-     * }
-     * 
- * - * @param statementTemplate text of a Neo4j statement - * @param statementParameters input data for the statement - * @return a stream of result values and associated metadata - */ - StatementResult run( String statementTemplate, Map statementParameters ); - - /** - * Run a statement and return a result stream. - *

- * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - *

- * This version of run takes a {@link Record} of parameters, which can be useful - * if you want to use the output of one statement as input for another. - * - * @param statementTemplate text of a Neo4j statement - * @param statementParameters input data for the statement - * @return a stream of result values and associated metadata - */ - StatementResult run( String statementTemplate, Record statementParameters ); - - /** - * Run a statement and return a result stream. - * - * @param statementTemplate text of a Neo4j statement - * @return a stream of result values and associated metadata - */ - StatementResult run( String statementTemplate ); - - /** - * Run a statement and return a result stream. - *

Example

- *
-     * {@code
-     *
-     * Statement statement = new Statement( "MATCH (n) WHERE n.name=$myNameParam RETURN n.age" );
-     * StatementResult cursor = session.run( statement.withParameters( Values.parameters( "myNameParam", "Bob" )  ) );
-     * }
-     * 
- * - * @param statement a Neo4j statement - * @return a stream of result values and associated metadata - */ - StatementResult run( Statement statement ); - - /** - * @return type system used by this statement runner for classifying values - */ - @Experimental - TypeSystem typeSystem(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/Transaction.java b/src/graiph-driver/java/org/neo4j/driver/Transaction.java deleted file mode 100644 index c2b279ea..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/Transaction.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import org.neo4j.driver.util.Resource; - -/** - * Logical container for an atomic unit of work. - * A driver Transaction object corresponds to a server transaction. - *

- * Transactions are typically wrapped in a try-with-resources block - * which ensures that COMMIT or ROLLBACK - * occurs correctly on close. Note that ROLLBACK is the - * default action unless {@link #success()} is called before closing. - *

- * {@code
- * try(Transaction tx = session.beginTransaction())
- * {
- *     tx.run("CREATE (a:Person {name: {x}})", parameters("x", "Alice"));
- *     tx.success();
- * }
- * }
- * 
- * Blocking calls are: {@link #success()}, {@link #failure()}, {@link #close()} - * and various overloads of {@link #run(Statement)}. - * - * @see Session#run - * @see StatementRunner - * @since 1.0 - */ -public interface Transaction extends Resource, StatementRunner -{ - /** - * Mark this transaction as successful. You must call this method before calling {@link #close()} to have your - * transaction committed. - */ - void success(); - - /** - * Mark this transaction as failed. When you call {@link #close()}, the transaction will value rolled back. - * - * After this method has been called, there is nothing that can be done to "un-mark" it. This is a safety feature - * to make sure no other code calls {@link #success()} and makes a transaction commit that was meant to be rolled - * back. - * - * Example: - * - *
-     * {@code
-     * try(Transaction tx = session.beginTransaction() )
-     * {
-     *     tx.run("CREATE (a:Person {name: {x}})", parameters("x", "Alice"));
-     *     tx.failure();
-     * }
-     * }
-     * 
- */ - void failure(); - - /** - * Closing the transaction will complete it - it will commit if {@link #success()} has been called. - * When this method returns, all outstanding statements in the transaction are guaranteed to - * have completed, meaning any writes you performed are guaranteed to be durably stored. - */ - @Override - void close(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/TransactionConfig.java b/src/graiph-driver/java/org/neo4j/driver/TransactionConfig.java deleted file mode 100644 index 1b857633..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/TransactionConfig.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -import java.time.Duration; -import java.util.Map; -import java.util.Objects; - -import org.neo4j.driver.async.AsyncSession; -import org.neo4j.driver.async.AsyncTransactionWork; -import org.neo4j.driver.internal.util.Extract; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.unmodifiableMap; -import static java.util.Objects.requireNonNull; -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -/** - * Configuration object containing settings for explicit and auto-commit transactions. - * Instances are immutable and can be reused for multiple transactions. - *

- * Configuration is supported for: - *

    - *
  • queries executed in auto-commit transactions - using various overloads of {@link Session#run(String, TransactionConfig)} and - * {@link AsyncSession#runAsync(String, TransactionConfig)}
  • - *
  • transactions started by transaction functions - using {@link Session#readTransaction(TransactionWork, TransactionConfig)}, - * {@link Session#writeTransaction(TransactionWork, TransactionConfig)}, {@link AsyncSession#readTransactionAsync(AsyncTransactionWork, TransactionConfig)} and - * {@link AsyncSession#writeTransactionAsync(AsyncTransactionWork, TransactionConfig)}
  • - *
  • explicit transactions - using {@link Session#beginTransaction(TransactionConfig)} and {@link AsyncSession#beginTransactionAsync(TransactionConfig)}
  • - *
- *

- * Creation of configuration objects can be done using the builder API: - *

- * {@code
- * Map metadata = new HashMap<>();
- * metadata.put("type", "update user");
- * metadata.put("application", "my application");
- *
- * TransactionConfig config = TransactionConfig.builder()
- *                 .withTimeout(Duration.ofSeconds(4))
- *                 .withMetadata(metadata)
- *                 .build();
- * }
- * 
- * - * @see Session - */ -public class TransactionConfig -{ - private static final TransactionConfig EMPTY = builder().build(); - - private final Duration timeout; - private final Map metadata; - - private TransactionConfig( Builder builder ) - { - this.timeout = builder.timeout; - this.metadata = unmodifiableMap( builder.metadata ); - } - - /** - * Get a configuration object that does not have any values configures. - * - * @return an empty configuration object. - */ - public static TransactionConfig empty() - { - return EMPTY; - } - - /** - * Create new {@link Builder} used to construct a configuration object. - * - * @return new builder. - */ - public static Builder builder() - { - return new Builder(); - } - - /** - * Get the configured transaction timeout. - * - * @return timeout or {@code null} when it is not configured. - */ - public Duration timeout() - { - return timeout; - } - - /** - * Get the configured transaction metadata. - * - * @return metadata or empty map when it is not configured. - */ - public Map metadata() - { - return metadata; - } - - /** - * Check if this configuration object contains any values. - * - * @return {@code true} when no values are configured, {@code false otherwise}. - */ - public boolean isEmpty() - { - return timeout == null && (metadata == null || metadata.isEmpty()); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - TransactionConfig that = (TransactionConfig) o; - return Objects.equals( timeout, that.timeout ) && - Objects.equals( metadata, that.metadata ); - } - - @Override - public int hashCode() - { - return Objects.hash( timeout, metadata ); - } - - @Override - public String toString() - { - return "TransactionConfig{" + - "timeout=" + timeout + - ", metadata=" + metadata + - '}'; - } - - /** - * Builder used to construct {@link TransactionConfig transaction configuration} objects. - */ - public static class Builder - { - private Duration timeout; - private Map metadata = emptyMap(); - - private Builder() - { - } - - /** - * Set the transaction timeout. Transactions that execute longer than the configured timeout will be terminated by the database. - *

- * This functionality allows to limit query/transaction execution time. Specified timeout overrides the default timeout configured in the database - * using {@code dbms.transaction.timeout} setting. - *

- * Provided value should not be {@code null} and should not represent a duration of zero or negative duration. - * - * @param timeout the timeout. - * @return this builder. - */ - public Builder withTimeout( Duration timeout ) - { - requireNonNull( timeout, "Transaction timeout should not be null" ); - checkArgument( !timeout.isZero(), "Transaction timeout should not be zero" ); - checkArgument( !timeout.isNegative(), "Transaction timeout should not be negative" ); - - this.timeout = timeout; - return this; - } - - /** - * Set the transaction metadata. Specified metadata will be attached to the executing transaction and visible in the output of - * {@code dbms.listQueries} and {@code dbms.listTransactions} procedures. It will also get logged to the {@code query.log}. - *

- * This functionality makes it easier to tag transactions and is equivalent to {@code dbms.setTXMetaData} procedure. - *

- * Provided value should not be {@code null}. - * - * @param metadata the metadata. - * @return this builder. - */ - public Builder withMetadata( Map metadata ) - { - requireNonNull( metadata, "Transaction metadata should not be null" ); - - this.metadata = Extract.mapOfValues( metadata ); - return this; - } - - /** - * Build the transaction configuration object using the specified settings. - * - * @return new transaction configuration object. - */ - public TransactionConfig build() - { - return new TransactionConfig( this ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/TransactionWork.java b/src/graiph-driver/java/org/neo4j/driver/TransactionWork.java deleted file mode 100644 index 4e1fe97e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/TransactionWork.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver; - -/** - * Callback that executes operations against a given {@link Transaction}. - * To be used with {@link Session#readTransaction(TransactionWork)} and - * {@link Session#writeTransaction(TransactionWork)} methods. - * - * @param the return type of this work. - */ -public interface TransactionWork -{ - /** - * Executes all given operations against the same transaction. - * - * @param tx the transaction to use. - * @return some result object or {@code null} if none. - */ - T execute( Transaction tx ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/async/AsyncSession.java b/src/graiph-driver/java/org/neo4j/driver/async/AsyncSession.java deleted file mode 100644 index 7c70e0e7..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/async/AsyncSession.java +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.async; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.Executor; -import java.util.function.Function; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Transaction; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.Values; - -/** - * Provides a context of work for database interactions. - *

- * A AsyncSession hosts a series of {@linkplain AsyncTransaction transactions} - * carried out against a database. Within the database, all statements are - * carried out within a transaction. Within application code, however, it is - * not always necessary to explicitly {@link #beginTransactionAsync() begin a - * transaction}. If a statement is {@link #runAsync} directly against a {@link - * AsyncSession}, the server will automatically BEGIN and - * COMMIT that statement within its own transaction. This type - * of transaction is known as an autocommit transaction. - *

- * Explicit transactions allow multiple statements to be committed as part of - * a single atomic operation and can be rolled back if necessary. They can also - * be used to ensure causal consistency, meaning that an application - * can run a series of queries on different members of a cluster, while - * ensuring that each query sees the state of graph at least as up-to-date as - * the graph seen by the previous query. For more on causal consistency, see - * the Neo4j clustering manual. - *

- * Typically, a session will acquire a TCP connection to execute query or - * transaction. Such a connection will be acquired from a connection pool - * and released back there when query result is consumed or transaction is - * committed or rolled back. One connection can therefore be adopted by many - * sessions, although by only one at a time. Application code should never need - * to deal directly with connection management. - *

- * A session inherits its destination address and permissions from its - * underlying connection. This means that for a single query/transaction one - * session may only ever target one machine within a cluster and does not - * support re-authentication. To achieve otherwise requires creation of a - * separate session. - *

- * Similarly, multiple sessions should be used when working with concurrency; - * session implementations are not thread safe. - * - * @since 2.0 - */ -public interface AsyncSession extends AsyncStatementRunner -{ - /** - * Begin a new explicit {@linkplain Transaction transaction}. At - * most one transaction may exist in a session at any point in time. To - * maintain multiple concurrent transactions, use multiple concurrent - * sessions. - *

- * This operation is asynchronous and returns a {@link CompletionStage}. This stage is completed with a new - * {@link Transaction} object when begin operation is successful. - * It is completed exceptionally if transaction can't be started. - *

- * Returned stage can be completed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned stage. - * Consider using asynchronous calls throughout the chain or offloading blocking operation to a different {@link Executor}. - * This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @return a {@link CompletionStage completion stage} that represents the asynchronous begin of a transaction. - */ - CompletionStage beginTransactionAsync(); - - /** - * Begin a new explicit {@linkplain AsyncTransaction transaction} with the specified {@link TransactionConfig configuration}. - * At most one transaction may exist in a session at any point in time. - * To maintain multiple concurrent transactions, use multiple concurrent sessions. - *

- * This operation is asynchronous and returns a {@link CompletionStage}. This stage is completed with a new - * {@link AsyncTransaction} object when begin operation is successful. It is completed exceptionally if - * transaction can't be started. - *

- * Returned stage can be completed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned stage. - * Consider using asynchronous calls throughout the chain or offloading blocking operation to a different {@link Executor}. - * This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @param config configuration for the new transaction. - * @return a {@link CompletionStage completion stage} that represents the asynchronous begin of a transaction. - */ - CompletionStage beginTransactionAsync( TransactionConfig config ); - - /** - * Execute given unit of asynchronous work in a {@link AccessMode#READ read} asynchronous transaction. - *

- * Transaction will automatically be committed unless given unit of work fails or - * {@link AsyncTransaction#commitAsync() async transaction commit} fails. - * It will also not be committed if explicitly rolled back via {@link AsyncTransaction#rollbackAsync()}. - *

- * Returned stage and given {@link AsyncTransactionWork} can be completed/executed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned stage and do not use them inside the - * {@link AsyncTransactionWork}. - * Consider using asynchronous calls throughout the chain or offloading blocking operation to a different {@link Executor}. - * This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @param work the {@link AsyncTransactionWork} to be applied to a new read transaction. Operation executed by the - * given work must be asynchronous. - * @param the return type of the given unit of work. - * @return a {@link CompletionStage completion stage} completed with the same result as returned by the given - * unit of work. Stage can be completed exceptionally if given work or commit fails. - */ - CompletionStage readTransactionAsync( AsyncTransactionWork> work ); - - /** - * Execute given unit of asynchronous work in a {@link AccessMode#READ read} asynchronous transaction with - * the specified {@link TransactionConfig configuration}. - *

- * Transaction will automatically be committed unless given unit of work fails or - * {@link AsyncTransaction#commitAsync() async transaction commit} fails. - * It will also not be committed if explicitly rolled back via {@link AsyncTransaction#rollbackAsync()}. - *

- * Returned stage and given {@link AsyncTransactionWork} can be completed/executed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned stage and do not use them inside the - * {@link AsyncTransactionWork}. - * Consider using asynchronous calls throughout the chain or offloading blocking operation to a different {@link Executor}. - * This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @param work the {@link AsyncTransactionWork} to be applied to a new read transaction. Operation executed by the - * given work must be asynchronous. - * @param config configuration for all transactions started to execute the unit of work. - * @param the return type of the given unit of work. - * @return a {@link CompletionStage completion stage} completed with the same result as returned by the given - * unit of work. Stage can be completed exceptionally if given work or commit fails. - */ - CompletionStage readTransactionAsync( AsyncTransactionWork> work, TransactionConfig config ); - - /** - * Execute given unit of asynchronous work in a {@link AccessMode#WRITE write} asynchronous transaction. - *

- * Transaction will automatically be committed unless given unit of work fails or - * {@link AsyncTransaction#commitAsync() async transaction commit} fails. It will also not be committed if explicitly - * rolled back via {@link AsyncTransaction#rollbackAsync()}. - *

- * Returned stage and given {@link AsyncTransactionWork} can be completed/executed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned stage and do not use them inside the - * {@link AsyncTransactionWork}. - * Consider using asynchronous calls throughout the chain or offloading blocking operation to a different {@link Executor}. - * This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @param work the {@link AsyncTransactionWork} to be applied to a new write transaction. Operation executed by the - * given work must be asynchronous. - * @param the return type of the given unit of work. - * @return a {@link CompletionStage completion stage} completed with the same result as returned by the given - * unit of work. Stage can be completed exceptionally if given work or commit fails. - */ - CompletionStage writeTransactionAsync( AsyncTransactionWork> work ); - - /** - * Execute given unit of asynchronous work in a {@link AccessMode#WRITE write} asynchronous transaction with - * the specified {@link TransactionConfig configuration}. - *

- * Transaction will automatically be committed unless given unit of work fails or - * {@link AsyncTransaction#commitAsync() async transaction commit} fails. It will also not be committed if explicitly - * rolled back via {@link AsyncTransaction#rollbackAsync()}. - *

- * Returned stage and given {@link AsyncTransactionWork} can be completed/executed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned stage and do not use them inside the - * {@link AsyncTransactionWork}. - * Consider using asynchronous calls throughout the chain or offloading blocking operation to a different {@link Executor}. - * This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @param work the {@link AsyncTransactionWork} to be applied to a new write transaction. Operation executed by the - * given work must be asynchronous. - * @param config configuration for all transactions started to execute the unit of work. - * @param the return type of the given unit of work. - * @return a {@link CompletionStage completion stage} completed with the same result as returned by the given - * unit of work. Stage can be completed exceptionally if given work or commit fails. - */ - CompletionStage writeTransactionAsync( AsyncTransactionWork> work, TransactionConfig config ); - - /** - * Run a statement asynchronously in an auto-commit transaction with the specified {@link TransactionConfig configuration} and return a - * {@link CompletionStage} with a result cursor. - *

- * It is not allowed to chain blocking operations on the returned {@link CompletionStage}. See class javadoc in {@link AsyncStatementRunner} for - * more information. - * - * @param statement text of a Neo4j statement. - * @param config configuration for the new transaction. - * @return new {@link CompletionStage} that gets completed with a result cursor when query execution is successful. - * Stage can be completed exceptionally when error happens, e.g. connection can't be acquired from the pool. - */ - CompletionStage runAsync( String statement, TransactionConfig config ); - - /** - * Run a statement asynchronously in an auto-commit transaction with the specified {@link TransactionConfig configuration} and return a - * {@link CompletionStage} with a result cursor. - *

- * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - *

- * This version of runAsync takes a {@link Map} of parameters. The values in the map - * must be values that can be converted to Neo4j types. See {@link Values#parameters(Object...)} for - * a list of allowed types. - *

Example

- *
-     * {@code
-     * Map metadata = new HashMap<>();
-     * metadata.put("type", "update name");
-     *
-     * TransactionConfig config = TransactionConfig.builder()
-     *                 .withTimeout(Duration.ofSeconds(3))
-     *                 .withMetadata(metadata)
-     *                 .build();
-     *
-     * Map parameters = new HashMap();
-     * parameters.put("myNameParam", "Bob");
-     *
-     * CompletionStage cursorStage = session.runAsync(
-     *             "MATCH (n) WHERE n.name = {myNameParam} RETURN (n)",
-     *             parameters,
-     *             config);
-     * }
-     * 
- * It is not allowed to chain blocking operations on the returned {@link CompletionStage}. See class javadoc in {@link AsyncStatementRunner} for - * more information. - * - * @param statement text of a Neo4j statement. - * @param parameters input data for the statement. - * @param config configuration for the new transaction. - * @return new {@link CompletionStage} that gets completed with a result cursor when query execution is successful. - * Stage can be completed exceptionally when error happens, e.g. connection can't be acquired from the pool. - */ - CompletionStage runAsync( String statement, Map parameters, TransactionConfig config ); - - /** - * Run a statement asynchronously in an auto-commit transaction with the specified {@link TransactionConfig configuration} and return a - * {@link CompletionStage} with a result cursor. - *

Example

- *
-     * {@code
-     * Map metadata = new HashMap<>();
-     * metadata.put("type", "update name");
-     *
-     * TransactionConfig config = TransactionConfig.builder()
-     *                 .withTimeout(Duration.ofSeconds(3))
-     *                 .withMetadata(metadata)
-     *                 .build();
-     *
-     * Statement statement = new Statement( "MATCH (n) WHERE n.name=$myNameParam RETURN n.age" );
-     * CompletionStage cursorStage = session.runAsync(statement, config);
-     * }
-     * 
- * It is not allowed to chain blocking operations on the returned {@link CompletionStage}. See class javadoc in {@link AsyncStatementRunner} for - * more information. - * - * @param statement a Neo4j statement. - * @param config configuration for the new transaction. - * @return new {@link CompletionStage} that gets completed with a result cursor when query execution is successful. - * Stage can be completed exceptionally when error happens, e.g. connection can't be acquired from the pool. - */ - CompletionStage runAsync( Statement statement, TransactionConfig config ); - - /** - * Return the bookmark received following the last completed - * {@linkplain Transaction transaction}. If no bookmark was received - * or if this transaction was rolled back, the bookmark value will - * be null. - * - * @return a reference to a previous transaction - */ - String lastBookmark(); - - /** - * Signal that you are done using this session. In the default driver usage, closing and accessing sessions is - * very low cost. - *

- * This operation is asynchronous and returns a {@link CompletionStage}. Stage is completed when all outstanding - * statements in the session have completed, meaning any writes you performed are guaranteed to be durably stored. - * It might be completed exceptionally when there are unconsumed errors from previous statements or transactions. - * - * @return a {@link CompletionStage completion stage} that represents the asynchronous close. - */ - CompletionStage closeAsync(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/async/AsyncStatementRunner.java b/src/graiph-driver/java/org/neo4j/driver/async/AsyncStatementRunner.java deleted file mode 100644 index cf6aa2c4..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/async/AsyncStatementRunner.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.async; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.Executor; -import java.util.function.Function; - -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; - -/** - * Asynchronous interface for components that can execute Neo4j statements. - * - *

Important notes on semantics

- *

- * Statements run in the same {@link AsyncStatementRunner} are guaranteed - * to execute in order, meaning changes made by one statement will be seen - * by all subsequent statements in the same {@link AsyncStatementRunner}. - *

- * However, to allow handling very large results, and to improve performance, - * result streams are retrieved lazily from the network. This means that when - * async {@link #runAsync(Statement)} - * methods return a result, the statement has only started executing - it may not - * have completed yet. Most of the time, you will not notice this, because the - * driver automatically waits for statements to complete at specific points to - * fulfill its contracts. - *

- * Specifically, the driver will ensure all outstanding statements are completed - * whenever you: - * - *

    - *
  • Read from or discard a result, for instance via - * {@link StatementResultCursor#nextAsync()}, {@link StatementResultCursor#consumeAsync()}
  • - *
  • Explicitly commit/rollback a transaction using {@link AsyncTransaction#commitAsync()}, {@link AsyncTransaction#rollbackAsync()}
  • - *
  • Close a session using {@link AsyncSession#closeAsync()}
  • - *
- *

- * As noted, most of the time, you will not need to consider this - your writes will - * always be durably stored as long as you either use the results, explicitly commit - * {@link AsyncTransaction transactions} or close the session you used using {@link AsyncSession#closeAsync()}}. - *

- * While these semantics introduce some complexity, it gives the driver the ability - * to handle infinite result streams (like subscribing to events), significantly lowers - * the memory overhead for your application and improves performance. - * - *

Asynchronous API

- *

- * All overloads of {@link #runAsync(Statement)} execute queries in async fashion and return {@link CompletionStage} of - * a new {@link StatementResultCursor}. Stage can be completed exceptionally when error happens, e.g. connection can't - * be acquired from the pool. - *

- * Note: Returned stage can be completed by an IO thread which should never block. Otherwise IO operations on - * this and potentially other network connections might deadlock. Please do not chain blocking operations like - * {@link CompletableFuture#get()} on the returned stage. Consider using asynchronous calls throughout the chain or offloading blocking - * operation to a different {@link Executor}. This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @see AsyncSession - * @see AsyncTransaction - * @since 2.0 - */ -public interface AsyncStatementRunner -{ - /** - * Run a statement asynchronously and return a {@link CompletionStage} with a - * result cursor. - *

- * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - *

- * This particular method takes a {@link Value} as its input. This is useful - * if you want to take a map-like value that you've gotten from a prior result - * and send it back as parameters. - *

- * If you are creating parameters programmatically, {@link #runAsync(String, Map)} - * might be more helpful, it converts your map to a {@link Value} for you. - *

Example

- *
-     * {@code
-     *
-     * CompletionStage cursorStage = session.runAsync(
-     *             "MATCH (n) WHERE n.name = {myNameParam} RETURN (n)",
-     *             Values.parameters("myNameParam", "Bob"));
-     * }
-     * 
- * It is not allowed to chain blocking operations on the returned {@link CompletionStage}. See class javadoc for - * more information. - * - * @param statementTemplate text of a Neo4j statement - * @param parameters input parameters, should be a map Value, see {@link Values#parameters(Object...)}. - * @return new {@link CompletionStage} that gets completed with a result cursor when query execution is successful. - * Stage can be completed exceptionally when error happens, e.g. connection can't be acquired from the pool. - */ - CompletionStage runAsync( String statementTemplate, Value parameters ); - - /** - * Run a statement asynchronously and return a {@link CompletionStage} with a - * result cursor. - *

- * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - *

- * This version of runAsync takes a {@link Map} of parameters. The values in the map - * must be values that can be converted to Neo4j types. See {@link Values#parameters(Object...)} for - * a list of allowed types. - *

Example

- *
-     * {@code
-     *
-     * Map parameters = new HashMap();
-     * parameters.put("myNameParam", "Bob");
-     *
-     * CompletionStage cursorStage = session.runAsync(
-     *             "MATCH (n) WHERE n.name = {myNameParam} RETURN (n)",
-     *             parameters);
-     * }
-     * 
- * It is not allowed to chain blocking operations on the returned {@link CompletionStage}. See class javadoc for - * more information. - * - * @param statementTemplate text of a Neo4j statement - * @param statementParameters input data for the statement - * @return new {@link CompletionStage} that gets completed with a result cursor when query execution is successful. - * Stage can be completed exceptionally when error happens, e.g. connection can't be acquired from the pool. - */ - CompletionStage runAsync( String statementTemplate, Map statementParameters ); - - /** - * Run a statement asynchronously and return a {@link CompletionStage} with a - * result cursor. - *

- * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - *

- * This version of runAsync takes a {@link Record} of parameters, which can be useful - * if you want to use the output of one statement as input for another. - *

- * It is not allowed to chain blocking operations on the returned {@link CompletionStage}. See class javadoc for - * more information. - * - * @param statementTemplate text of a Neo4j statement - * @param statementParameters input data for the statement - * @return new {@link CompletionStage} that gets completed with a result cursor when query execution is successful. - * Stage can be completed exceptionally when error happens, e.g. connection can't be acquired from the pool. - */ - CompletionStage runAsync( String statementTemplate, Record statementParameters ); - - /** - * Run a statement asynchronously and return a {@link CompletionStage} with a - * result cursor. - *

- * It is not allowed to chain blocking operations on the returned {@link CompletionStage}. See class javadoc for - * more information. - * - * @param statementTemplate text of a Neo4j statement - * @return new {@link CompletionStage} that gets completed with a result cursor when query execution is successful. - * Stage can be completed exceptionally when error happens, e.g. connection can't be acquired from the pool. - */ - CompletionStage runAsync( String statementTemplate ); - - /** - * Run a statement asynchronously and return a {@link CompletionStage} with a - * result cursor. - *

Example

- *
-     * {@code
-     * Statement statement = new Statement( "MATCH (n) WHERE n.name=$myNameParam RETURN n.age" );
-     * CompletionStage cursorStage = session.runAsync(statement);
-     * }
-     * 
- * It is not allowed to chain blocking operations on the returned {@link CompletionStage}. See class javadoc for - * more information. - * - * @param statement a Neo4j statement - * @return new {@link CompletionStage} that gets completed with a result cursor when query execution is successful. - * Stage can be completed exceptionally when error happens, e.g. connection can't be acquired from the pool. - */ - CompletionStage runAsync( Statement statement ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/async/AsyncTransaction.java b/src/graiph-driver/java/org/neo4j/driver/async/AsyncTransaction.java deleted file mode 100644 index 4daaa775..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/async/AsyncTransaction.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.async; - -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.Executor; -import java.util.function.Function; - -import org.neo4j.driver.Session; -import org.neo4j.driver.Statement; -import org.neo4j.driver.StatementRunner; - -/** - * Logical container for an atomic unit of work. - * A driver Transaction object corresponds to a server transaction. - *

- * Transactions are typically obtained in a {@link CompletionStage} and all - * operations chain on this stage. Explicit commit with {@link #commitAsync()} - * or rollback with {@link #rollbackAsync()} is required. Without explicit - * commit/rollback corresponding transaction will remain open in the database. - *

- * {@code
- * session.beginTransactionAsync()
- *        .thenCompose(tx ->
- *               tx.runAsync("CREATE (a:Person {name: {x}})", parameters("x", "Alice"))
- *                 .exceptionally(e -> {
- *                    e.printStackTrace();
- *                    return null;
- *                 })
- *                 .thenApply(ignore -> tx)
- *        ).thenCompose(Transaction::commitAsync);
- * }
- * 
- * Async calls are: {@link #commitAsync()}, {@link #rollbackAsync()} and various overloads of - * {@link #runAsync(Statement)}. - * - * @see Session#run - * @see StatementRunner - * @since 2.0 - */ -public interface AsyncTransaction extends AsyncStatementRunner -{ - /** - * Commit this transaction in asynchronous fashion. This operation is typically executed as part of the - * {@link CompletionStage} chain that starts with a transaction. - * There is no need to close transaction after calling this method. - * Transaction object should not be used after calling this method. - *

- * Returned stage can be completed by an IO thread which should never block. Otherwise IO operations on this and - * potentially other network connections might deadlock. Please do not chain blocking operations like - * {@link CompletableFuture#get()} on the returned stage. Consider using asynchronous calls throughout the chain or offloading blocking - * operation to a different {@link Executor}. This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @return new {@link CompletionStage} that gets completed with {@code null} when commit is successful. Stage can - * be completed exceptionally when commit fails. - */ - CompletionStage commitAsync(); - - /** - * Rollback this transaction in asynchronous fashion. This operation is typically executed as part of the - * {@link CompletionStage} chain that starts with a transaction. - * There is no need to close transaction after calling this method. - * Transaction object should not be used after calling this method. - *

- * Returned stage can be completed by an IO thread which should never block. Otherwise IO operations on this and - * potentially other network connections might deadlock. Please do not chain blocking operations like - * {@link CompletableFuture#get()} on the returned stage. Consider using asynchronous calls throughout the chain or offloading blocking - * operation to a different {@link Executor}. This can be done using methods with "Async" suffix like - * {@link CompletionStage#thenApplyAsync(Function)} or {@link CompletionStage#thenApplyAsync(Function, Executor)}. - * - * @return new {@link CompletionStage} that gets completed with {@code null} when rollback is successful. Stage can - * be completed exceptionally when rollback fails. - */ - CompletionStage rollbackAsync(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/async/AsyncTransactionWork.java b/src/graiph-driver/java/org/neo4j/driver/async/AsyncTransactionWork.java deleted file mode 100644 index 456c9b0a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/async/AsyncTransactionWork.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.async; - -/** - * Callback that executes operations against a given {@link AsyncTransaction}. - * To be used with {@link AsyncSession#readTransactionAsync(AsyncTransactionWork)} and - * {@link AsyncSession#writeTransactionAsync(AsyncTransactionWork)} (AsyncTransactionWork)} methods. - * - * @param the return type of this work. - * @since 2.0 - */ -public interface AsyncTransactionWork -{ - /** - * Executes all given operations against the same transaction. - * - * @param tx the transaction to use. - * @return some result object or {@code null} if none. - */ - T execute( AsyncTransaction tx ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/async/StatementResultCursor.java b/src/graiph-driver/java/org/neo4j/driver/async/StatementResultCursor.java deleted file mode 100644 index 0fca6067..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/async/StatementResultCursor.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.async; - -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.Executor; -import java.util.function.Consumer; -import java.util.function.Function; - -import org.neo4j.driver.Record; -import org.neo4j.driver.Records; -import org.neo4j.driver.exceptions.NoSuchRecordException; -import org.neo4j.driver.summary.ResultSummary; - -/** - * The result of asynchronous execution of a Cypher statement, conceptually an asynchronous stream of - * {@link Record records}. - *

- * Result can be eagerly fetched in a list using {@link #listAsync()} or navigated lazily using - * {@link #forEachAsync(Consumer)} or {@link #nextAsync()}. - *

- * Results are valid until the next statement is run or until the end of the current transaction, - * whichever comes first. To keep a result around while further statements are run, or to use a result outside the scope - * of the current transaction, see {@link #listAsync()}. - *

Important note on semantics

- *

- * In order to handle very large results, and to minimize memory overhead and maximize - * performance, results are retrieved lazily. Please see {@link AsyncStatementRunner} for - * important details on the effects of this. - *

- * The short version is that, if you want a hard guarantee that the underlying statement - * has completed, you need to either call {@link AsyncTransaction#commitAsync()} on the {@link AsyncTransaction transaction} - * or {@link AsyncSession#closeAsync()} on the {@link AsyncSession session} that created this result, or you need to use - * the result. - *

- * Note: Every returned {@link CompletionStage} can be completed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. Please do not chain - * blocking operations like {@link CompletableFuture#get()} on the returned stages. Consider using asynchronous calls - * throughout the chain or offloading blocking operation to a different {@link Executor}. This can be done using - * methods with "Async" suffix like {@link CompletionStage#thenApplyAsync(java.util.function.Function)} or - * {@link CompletionStage#thenApplyAsync(java.util.function.Function, Executor)}. - * - * @since 1.5 - */ -public interface StatementResultCursor -{ - /** - * Retrieve the keys of the records this result cursor contains. - * - * @return list of all keys. - */ - List keys(); - - /** - * Asynchronously retrieve the result summary. - *

- * If the records in the result is not fully consumed, then calling this method will force to pull all remaining - * records into buffer to yield the summary. - *

- * If you want to obtain the summary but discard the records, use {@link #consumeAsync()} instead. - * - * @return a {@link CompletionStage} completed with a summary for the whole query result. Stage can also be - * completed exceptionally if query execution fails. - */ - CompletionStage summaryAsync(); - - /** - * Asynchronously navigate to and retrieve the next {@link Record} in this result. Returned stage can contain - * {@code null} if end of records stream has been reached. - * - * @return a {@link CompletionStage} completed with a record or {@code null}. Stage can also be - * completed exceptionally if query execution fails. - */ - CompletionStage nextAsync(); - - /** - * Asynchronously investigate the next upcoming {@link Record} without moving forward in the result. Returned - * stage can contain {@code null} if end of records stream has been reached. - * - * @return a {@link CompletionStage} completed with a record or {@code null}. Stage can also be - * completed exceptionally if query execution fails. - */ - CompletionStage peekAsync(); - - /** - * Asynchronously return the first record in the result, failing if there is not exactly - * one record left in the stream. - * - * @return a {@link CompletionStage} completed with the first and only record in the stream. Stage will be - * completed exceptionally with {@link NoSuchRecordException} if there is not exactly one record left in the - * stream. It can also be completed exceptionally if query execution fails. - */ - CompletionStage singleAsync(); - - /** - * Asynchronously consume the entire result, yielding a summary of it. Calling this method exhausts the result. - * - * @return a {@link CompletionStage} completed with a summary for the whole query result. Stage can also be - * completed exceptionally if query execution fails. - */ - CompletionStage consumeAsync(); - - /** - * Asynchronously apply the given {@link Consumer action} to every record in the result, yielding a summary of it. - * - * @param action the function to be applied to every record in the result. Provided function should not block. - * @return a {@link CompletionStage} completed with a summary for the whole query result. Stage can also be - * completed exceptionally if query execution or provided function fails. - */ - CompletionStage forEachAsync( Consumer action ); - - /** - * Asynchronously retrieve and store the entire result stream. - * This can be used if you want to iterate over the stream multiple times or to store the - * whole result for later use. - *

- * Note that this method can only be used if you know that the statement that - * yielded this result returns a finite stream. Some statements can yield - * infinite results, in which case calling this method will lead to running - * out of memory. - *

- * Calling this method exhausts the result. - * - * @return a {@link CompletionStage} completed with a list of all remaining immutable records. Stage can also be - * completed exceptionally if query execution fails. - */ - CompletionStage> listAsync(); - - /** - * Asynchronously retrieve and store a projection of the entire result. - * This can be used if you want to iterate over the stream multiple times or to store the - * whole result for later use. - *

- * Note that this method can only be used if you know that the statement that - * yielded this result returns a finite stream. Some statements can yield - * infinite results, in which case calling this method will lead to running - * out of memory. - *

- * Calling this method exhausts the result. - * - * @param mapFunction a function to map from Record to T. See {@link Records} for some predefined functions. - * @param the type of result list elements - * @return a {@link CompletionStage} completed with a list of all remaining immutable records. Stage can also be - * completed exceptionally if query execution or provided function fails. - */ - CompletionStage> listAsync( Function mapFunction ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/AuthenticationException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/AuthenticationException.java deleted file mode 100644 index ad4a8b1a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/AuthenticationException.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * Failed to authenticate the driver to the server due to bad credentials provided. - * When this error happens, the error could be recovered by closing the current driver and restart a new driver with - * the correct credentials. - * - * @since 1.1 - */ -public class AuthenticationException extends SecurityException -{ - public AuthenticationException( String code, String message ) - { - super( code, message ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/ClientException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/ClientException.java deleted file mode 100644 index 1b10e49a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/ClientException.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * A ClientException indicates that the client has carried out an operation incorrectly. - * The error code provided can be used to determine further detail for the problem. - * @since 1.0 - */ -public class ClientException extends Neo4jException -{ - public ClientException( String message ) - { - super( message ); - } - - public ClientException( String message, Throwable cause ) - { - super( message, cause ); - } - - public ClientException( String code, String message ) - { - super( code, message ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/DatabaseException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/DatabaseException.java deleted file mode 100644 index 57a76a88..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/DatabaseException.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * A DatabaseException indicates that there is a problem within the underlying database. - * The error code provided can be used to determine further detail for the problem. - * @since 1.0 - */ -public class DatabaseException extends Neo4jException -{ - public DatabaseException( String code, String message ) - { - super( code, message ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/Neo4jException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/Neo4jException.java deleted file mode 100644 index 2415cf04..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/Neo4jException.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * This is the base class for all exceptions caused as part of communication with the remote Neo4j server. - * - * @since 1.0 - */ -public abstract class Neo4jException extends RuntimeException -{ - private static final long serialVersionUID = -80579062276712566L; - - private final String code; - - public Neo4jException( String message ) - { - this( "N/A", message ); - } - - public Neo4jException( String message, Throwable cause ) - { - this( "N/A", message, cause ); - } - - public Neo4jException( String code, String message ) - { - this( code, message, null ); - } - - public Neo4jException( String code, String message, Throwable cause ) - { - super( message, cause ); - this.code = code; - } - - /** - * Access the standard Neo4j Status Code for this exception, you can use this to refer to the Neo4j manual for - * details on what caused the error. - * - * @return the Neo4j Status Code for this exception, or 'N/A' if none is available - */ - @Deprecated - public String neo4jErrorCode() - { - return code; - } - - /** - * Access the status code for this exception. The Neo4j manual can - * provide further details on the available codes and their meanings. - * - * @return textual code, such as "Neo.ClientError.Procedure.ProcedureNotFound" - */ - public String code() - { - return code; - } - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/NoSuchRecordException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/NoSuchRecordException.java deleted file mode 100644 index c58539d0..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/NoSuchRecordException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -import java.util.NoSuchElementException; - -/** - * Thrown whenever a client expected to read a record that was not available (i.e. because it wasn't returned by the server). - * - * This usually indicates an expectation mismatch between client code and database application logic. - * - * @since 1.0 - */ -public class NoSuchRecordException extends NoSuchElementException -{ - private static final long serialVersionUID = 9091962868264042491L; - - public NoSuchRecordException( String message ) - { - super( message ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/ProtocolException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/ProtocolException.java deleted file mode 100644 index b77fcf42..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/ProtocolException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * A signal that the contract for client-server communication has broken down. - * The user should contact support and cannot resolve this his or herself. - */ -public class ProtocolException extends Neo4jException -{ - private static final String CODE = "Protocol violation: "; - - public ProtocolException( String message ) - { - super( CODE + message ); - } - - public ProtocolException( String message, Throwable e ) - { - super( CODE + message, e ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/SecurityException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/SecurityException.java deleted file mode 100644 index f9304e3f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/SecurityException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * Failed to communicate with the server due to security errors. - * When this type of error happens, the security cause of the error should be fixed to ensure the safety of your data. - * Restart of server/driver/cluster might be required to recover from this error. - * @since 1.1 - */ -public class SecurityException extends Neo4jException -{ - public SecurityException( String code, String message ) - { - super( code, message ); - } - - public SecurityException( String message, Throwable t ) - { - super( message, t ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/ServiceUnavailableException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/ServiceUnavailableException.java deleted file mode 100644 index 18fe6871..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/ServiceUnavailableException.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * An ServiceUnavailableException indicates that the driver cannot communicate with the cluster. - * @since 1.1 - */ -public class ServiceUnavailableException extends Neo4jException -{ - public ServiceUnavailableException( String message ) - { - super( message ); - } - - public ServiceUnavailableException( String message, Throwable throwable ) - { - super( message, throwable); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/SessionExpiredException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/SessionExpiredException.java deleted file mode 100644 index 67ccd74f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/SessionExpiredException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * A SessionExpiredException indicates that the session can no longer satisfy the criteria under which it - * was acquired, e.g. a server no longer accepts write requests. A new session needs to be acquired from the driver - * and all actions taken on the expired session must be replayed. - * @since 1.1 - */ -public class SessionExpiredException extends Neo4jException -{ - public SessionExpiredException( String message) - { - super( message ); - } - - public SessionExpiredException( String message, Throwable throwable ) - { - super( message, throwable ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/TransientException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/TransientException.java deleted file mode 100644 index 1ef16f4d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/TransientException.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions; - -/** - * A TransientException signals a temporary fault that may be worked around by retrying. - * The error code provided can be used to determine further detail for the problem. - * @since 1.0 - */ -public class TransientException extends Neo4jException -{ - public TransientException( String code, String message ) - { - super( code, message ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/UntrustedServerException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/UntrustedServerException.java deleted file mode 100644 index f74db113..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/UntrustedServerException.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.neo4j.driver.exceptions; - -/** - * Thrown if the remote server cannot be verified as Neo4j. - */ -public class UntrustedServerException extends RuntimeException -{ - public UntrustedServerException(String message) - { - super(message); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/LossyCoercion.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/value/LossyCoercion.java deleted file mode 100644 index 7d9445b5..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/LossyCoercion.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions.value; - -import static java.lang.String.format; - -/** - * A LossyCoercion exception indicates that the conversion cannot be achieved without losing precision. - * @since 1.0 - */ -public class LossyCoercion extends ValueException -{ - private static final long serialVersionUID = -6259981390929065201L; - - public LossyCoercion( String sourceTypeName, String destinationTypeName ) - { - super( format( "Cannot coerce %s to %s without losing precision", sourceTypeName, destinationTypeName ) ); - } - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/NotMultiValued.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/value/NotMultiValued.java deleted file mode 100644 index 67e8d4e2..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/NotMultiValued.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions.value; - -/** - * A NotMultiValued exception indicates that the value does not consist of multiple values, a.k.a. not a map - * or array. - * @since 1.0 - */ -public class NotMultiValued extends ValueException -{ - private static final long serialVersionUID = -7380569883011364090L; - - public NotMultiValued( String message ) - { - super( message ); - } - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/Uncoercible.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/value/Uncoercible.java deleted file mode 100644 index ea87bd4f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/Uncoercible.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions.value; - -import static java.lang.String.format; - -/** - * A Uncoercible exception indicates that the conversion cannot be achieved. - * @since 1.0 - */ -public class Uncoercible extends ValueException -{ - private static final long serialVersionUID = -6259981390929065201L; - - public Uncoercible( String sourceTypeName, String destinationTypeName ) - { - super( format( "Cannot coerce %s to %s", sourceTypeName, destinationTypeName ) ); - } - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/Unsizable.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/value/Unsizable.java deleted file mode 100644 index b8d3bdd2..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/Unsizable.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions.value; - -/** - * A Unsizable exception indicates that the value does not have a size. - * @since 1.0 - */ -public class Unsizable extends ValueException -{ - private static final long serialVersionUID = 741487155344252339L; - - public Unsizable( String message ) - { - super( message ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/ValueException.java b/src/graiph-driver/java/org/neo4j/driver/exceptions/value/ValueException.java deleted file mode 100644 index 780867b9..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/exceptions/value/ValueException.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.exceptions.value; - -import org.neo4j.driver.exceptions.ClientException; - -/** - * A ValueException indicates that the client has carried out an operation on values incorrectly. - * @since 1.0 - */ -public class ValueException extends ClientException -{ - private static final long serialVersionUID = -1269336313727174998L; - - public ValueException( String message ) - { - super( message ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/AbstractStatementRunner.java b/src/graiph-driver/java/org/neo4j/driver/internal/AbstractStatementRunner.java deleted file mode 100644 index 59abbf01..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/AbstractStatementRunner.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Map; - -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.StatementResult; -import org.neo4j.driver.StatementRunner; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.internal.util.Extract; -import org.neo4j.driver.internal.value.MapValue; -import org.neo4j.driver.types.TypeSystem; - -public abstract class AbstractStatementRunner implements StatementRunner -{ - @Override - public final StatementResult run( String statementTemplate, Value parameters ) - { - return run( new Statement( statementTemplate, parameters ) ); - } - - @Override - public final StatementResult run( String statementTemplate, Map statementParameters ) - { - return run( statementTemplate, parameters( statementParameters ) ); - } - - @Override - public final StatementResult run( String statementTemplate, Record statementParameters ) - { - return run( statementTemplate, parameters( statementParameters ) ); - } - - @Override - public final StatementResult run( String statementText ) - { - return run( statementText, Values.EmptyMap ); - } - - @Override - public final TypeSystem typeSystem() - { - return InternalTypeSystem.TYPE_SYSTEM; - } - - public static Value parameters( Record record ) - { - return record == null ? Values.EmptyMap : parameters( record.asMap() ); - } - - public static Value parameters( Map map ) - { - if ( map == null || map.isEmpty() ) - { - return Values.EmptyMap; - } - return new MapValue( Extract.mapOfValues( map ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/AsValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/AsValue.java deleted file mode 100644 index a51c638f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/AsValue.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import org.neo4j.driver.Value; - -public interface AsValue -{ - /** - * Retrieve a value representation of this - * - * @see Value - * @return {@link Value} that represents this - */ - Value asValue(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/BoltServerAddress.java b/src/graiph-driver/java/org/neo4j/driver/internal/BoltServerAddress.java deleted file mode 100644 index cdc314b0..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/BoltServerAddress.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.URI; -import java.net.UnknownHostException; -import java.util.Objects; - -import org.neo4j.driver.net.ServerAddress; - -import static java.util.Objects.requireNonNull; - -/** - * Holds a host and port pair that denotes a Bolt server address. - */ -public class BoltServerAddress implements ServerAddress -{ - public static final int DEFAULT_PORT = 7687; - public static final BoltServerAddress LOCAL_DEFAULT = new BoltServerAddress( "localhost", DEFAULT_PORT ); - - private final String originalHost; // This keeps the original host name provided by the user. - private final String host; // This could either be the same as originalHost or it is an IP address resolved from the original host. - private final int port; - private final String stringValue; - - public BoltServerAddress( String address ) - { - this( uriFrom( address ) ); - } - - public BoltServerAddress( URI uri ) - { - this( hostFrom( uri ), portFrom( uri ) ); - } - - public BoltServerAddress( String host, int port ) - { - this( host, host, port ); - } - - public BoltServerAddress( String originalHost, String host, int port ) - { - this.originalHost = requireNonNull( originalHost, "original host" ); - this.host = requireNonNull( host, "host" ); - this.port = requireValidPort( port ); - this.stringValue = String.format( "%s:%d", host, port ); - } - - public static BoltServerAddress from( ServerAddress address ) - { - return address instanceof BoltServerAddress - ? (BoltServerAddress) address - : new BoltServerAddress( address.host(), address.port() ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - BoltServerAddress that = (BoltServerAddress) o; - return port == that.port && originalHost.equals( that.originalHost ) && host.equals( that.host ); - } - - @Override - public int hashCode() - { - return Objects.hash( originalHost, host, port ); - } - - @Override - public String toString() - { - return stringValue; - } - - /** - * Create a {@link SocketAddress} from this bolt address. This method always attempts to resolve the hostname into - * an {@link InetAddress}. - * - * @return new socket address. - * @see InetSocketAddress - */ - public SocketAddress toSocketAddress() - { - return new InetSocketAddress( host, port ); - } - - /** - * Resolve the host name down to an IP address, if not already resolved. - * - * @return this instance if already resolved, otherwise a new address instance - * @throws UnknownHostException if no IP address for the host could be found - * @see InetAddress#getByName(String) - */ - public BoltServerAddress resolve() throws UnknownHostException - { - String ipAddress = InetAddress.getByName( host ).getHostAddress(); - if ( ipAddress.equals( host ) ) - { - return this; - } - else - { - return new BoltServerAddress( host, ipAddress, port ); - } - } - - @Override - public String host() - { - return host; - } - - public String originalHost() - { - return originalHost; - } - - @Override - public int port() - { - return port; - } - - private static String hostFrom( URI uri ) - { - String host = uri.getHost(); - if ( host == null ) - { - throw invalidAddressFormat( uri ); - } - return host; - } - - private static int portFrom( URI uri ) - { - int port = uri.getPort(); - return port == -1 ? DEFAULT_PORT : port; - } - - private static URI uriFrom( String address ) - { - String scheme; - String hostPort; - - String[] schemeSplit = address.split( "://" ); - if ( schemeSplit.length == 1 ) - { - // URI can't parse addresses without scheme, prepend fake "bolt://" to reuse the parsing facility - scheme = "bolt://"; - hostPort = hostPortFrom( schemeSplit[0] ); - } - else if ( schemeSplit.length == 2 ) - { - scheme = schemeSplit[0] + "://"; - hostPort = hostPortFrom( schemeSplit[1] ); - } - else - { - throw invalidAddressFormat( address ); - } - - return URI.create( scheme + hostPort ); - } - - private static String hostPortFrom( String address ) - { - if ( address.startsWith( "[" ) ) - { - // expected to be an IPv6 address like [::1] or [::1]:7687 - return address; - } - - boolean containsSingleColon = address.indexOf( ":" ) == address.lastIndexOf( ":" ); - if ( containsSingleColon ) - { - // expected to be an IPv4 address with or without port like 127.0.0.1 or 127.0.0.1:7687 - return address; - } - - // address contains multiple colons and does not start with '[' - // expected to be an IPv6 address without brackets - return "[" + address + "]"; - } - - private static RuntimeException invalidAddressFormat( URI uri ) - { - return invalidAddressFormat( uri.toString() ); - } - - private static RuntimeException invalidAddressFormat( String address ) - { - return new IllegalArgumentException( "Invalid address format `" + address + "`" ); - } - - private static int requireValidPort( int port ) - { - if ( port >= 0 && port <= 65_535 ) - { - return port; - } - throw new IllegalArgumentException( "Illegal port: " + port ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/Bookmarks.java b/src/graiph-driver/java/org/neo4j/driver/internal/Bookmarks.java deleted file mode 100644 index de07adb3..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/Bookmarks.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Collections; -import java.util.Iterator; -import java.util.Map; -import java.util.Objects; - -import org.neo4j.driver.Value; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; -import static org.neo4j.driver.internal.util.Iterables.newHashMapWithSize; -import static org.neo4j.driver.Values.value; - -public final class Bookmarks -{ - private static final String BOOKMARK_KEY = "bookmark"; - private static final String BOOKMARKS_KEY = "bookmarks"; - private static final String BOOKMARK_PREFIX = "neo4j:bookmark:v1:tx"; - - private static final long UNKNOWN_BOOKMARK_VALUE = -1; - - private static final Bookmarks EMPTY = new Bookmarks( Collections.emptySet() ); - - private final Iterable values; - private final String maxValue; - - private Bookmarks( Iterable values ) - { - this.values = values; - this.maxValue = maxBookmark( values ); - } - - public static Bookmarks empty() - { - return EMPTY; - } - - public static Bookmarks from( String value ) - { - if ( value == null ) - { - return empty(); - } - return from( singleton( value ) ); - } - - public static Bookmarks from( Iterable values ) - { - if ( values == null ) - { - return empty(); - } - return new Bookmarks( values ); - } - - public boolean isEmpty() - { - return maxValue == null; - } - - public String maxBookmarkAsString() - { - return maxValue; - } - - public Iterable values() - { - return values; - } - - public Map asBeginTransactionParameters() - { - if ( isEmpty() ) - { - return emptyMap(); - } - - // Driver sends {bookmark: "max", bookmarks: ["one", "two", "max"]} instead of simple - // {bookmarks: ["one", "two", "max"]} for backwards compatibility reasons. Old servers can only accept single - // bookmark that is why driver has to parse and compare given list of bookmarks. This functionality will - // eventually be removed. - Map parameters = newHashMapWithSize( 2 ); - parameters.put( BOOKMARK_KEY, value( maxValue ) ); - parameters.put( BOOKMARKS_KEY, value( values ) ); - return parameters; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - Bookmarks bookmarks = (Bookmarks) o; - return Objects.equals( values, bookmarks.values ) && - Objects.equals( maxValue, bookmarks.maxValue ); - } - - @Override - public int hashCode() - { - return Objects.hash( values, maxValue ); - } - - @Override - public String toString() - { - return "Bookmarks{values=" + values + "}"; - } - - private static String maxBookmark( Iterable bookmarks ) - { - if ( bookmarks == null ) - { - return null; - } - - Iterator iterator = bookmarks.iterator(); - - if ( !iterator.hasNext() ) - { - return null; - } - - String maxBookmark = iterator.next(); - long maxValue = bookmarkValue( maxBookmark ); - - while ( iterator.hasNext() ) - { - String bookmark = iterator.next(); - long value = bookmarkValue( bookmark ); - - if ( value > maxValue ) - { - maxBookmark = bookmark; - maxValue = value; - } - } - - return maxBookmark; - } - - private static long bookmarkValue( String value ) - { - if ( value != null && value.startsWith( BOOKMARK_PREFIX ) ) - { - try - { - return Long.parseLong( value.substring( BOOKMARK_PREFIX.length() ) ); - } - catch ( NumberFormatException e ) - { - return UNKNOWN_BOOKMARK_VALUE; - } - } - return UNKNOWN_BOOKMARK_VALUE; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/BookmarksHolder.java b/src/graiph-driver/java/org/neo4j/driver/internal/BookmarksHolder.java deleted file mode 100644 index 1bf492b6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/BookmarksHolder.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -public interface BookmarksHolder -{ - Bookmarks getBookmarks(); - - void setBookmarks( Bookmarks bookmarks ); - - String lastBookmark(); - - BookmarksHolder NO_OP = new BookmarksHolder() - { - @Override - public Bookmarks getBookmarks() - { - return Bookmarks.empty(); - } - - @Override - public void setBookmarks( Bookmarks bookmarks ) - { - } - - @Override - public String lastBookmark() - { - return null; - } - }; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/ConnectionSettings.java b/src/graiph-driver/java/org/neo4j/driver/internal/ConnectionSettings.java deleted file mode 100644 index b3a4f447..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/ConnectionSettings.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import org.neo4j.driver.AuthToken; -import org.neo4j.driver.Session; - -import static java.lang.String.format; - -/** - * The connection settings are used whenever a new connection is - * established to a server, specifically as part of the INIT request. - */ -public class ConnectionSettings -{ - private static final String DEFAULT_USER_AGENT = format( "neo4j-java/%s", driverVersion() ); - - /** - * Extracts the driver version from the driver jar MANIFEST.MF file. - */ - private static String driverVersion() - { - // "Session" is arbitrary - the only thing that matters is that the class we use here is in the - // 'org.neo4j.driver' package, because that is where the jar manifest specifies the version. - // This is done as part of the build, adding a MANIFEST.MF file to the generated jarfile. - Package pkg = Session.class.getPackage(); - if ( pkg != null && pkg.getImplementationVersion() != null ) - { - return pkg.getImplementationVersion(); - } - - // If there is no version, we're not running from a jar file, but from raw compiled class files. - // This should only happen during development, so call the version 'dev'. - return "dev"; - } - - private final AuthToken authToken; - private final String userAgent; - private final int connectTimeoutMillis; - - public ConnectionSettings( AuthToken authToken, String userAgent, int connectTimeoutMillis ) - { - this.authToken = authToken; - this.userAgent = userAgent; - this.connectTimeoutMillis = connectTimeoutMillis; - } - - public ConnectionSettings( AuthToken authToken, int connectTimeoutMillis ) - { - this( authToken, DEFAULT_USER_AGENT, connectTimeoutMillis ); - } - - public AuthToken authToken() - { - return authToken; - } - - public String userAgent() - { - return userAgent; - } - - public int connectTimeoutMillis() - { - return connectTimeoutMillis; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/DefaultBookmarksHolder.java b/src/graiph-driver/java/org/neo4j/driver/internal/DefaultBookmarksHolder.java deleted file mode 100644 index 04c0f810..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/DefaultBookmarksHolder.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -/** - * @since 2.0 - */ -public class DefaultBookmarksHolder implements BookmarksHolder -{ - private volatile Bookmarks bookmarks; - - public DefaultBookmarksHolder() - { - this( Bookmarks.empty() ); - } - - public DefaultBookmarksHolder( Bookmarks bookmarks ) - { - this.bookmarks = bookmarks; - } - - @Override - public Bookmarks getBookmarks() - { - return bookmarks; - } - - @Override - public void setBookmarks( Bookmarks bookmarks ) - { - if ( bookmarks != null && !bookmarks.isEmpty() ) - { - this.bookmarks = bookmarks; - } - } - - @Override - public String lastBookmark() - { - return bookmarks == null ? null : bookmarks.maxBookmarkAsString(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/DirectConnectionProvider.java b/src/graiph-driver/java/org/neo4j/driver/internal/DirectConnectionProvider.java deleted file mode 100644 index 329414c0..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/DirectConnectionProvider.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.internal.async.connection.DecoratedConnection; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ConnectionPool; -import org.neo4j.driver.internal.spi.ConnectionProvider; - -import static org.neo4j.driver.AccessMode.READ; -import static org.neo4j.driver.internal.messaging.request.MultiDatabaseUtil.ABSENT_DB_NAME; - -/** - * Simple {@link ConnectionProvider connection provider} that obtains connections form the given pool only for - * the given address. - */ -public class DirectConnectionProvider implements ConnectionProvider -{ - private final BoltServerAddress address; - private final ConnectionPool connectionPool; - - DirectConnectionProvider( BoltServerAddress address, ConnectionPool connectionPool ) - { - this.address = address; - this.connectionPool = connectionPool; - } - - @Override - public CompletionStage acquireConnection( String databaseName, AccessMode mode ) - { - return connectionPool.acquire( address ).thenApply( connection -> new DecoratedConnection( connection, databaseName, mode ) ); - } - - @Override - public CompletionStage verifyConnectivity() - { - // we verify the connection by establishing the connection to the default database - return acquireConnection( ABSENT_DB_NAME, READ ).thenCompose( Connection::release ); - } - - @Override - public CompletionStage close() - { - return connectionPool.close(); - } - - public BoltServerAddress getAddress() - { - return address; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/DriverFactory.java b/src/graiph-driver/java/org/neo4j/driver/internal/DriverFactory.java deleted file mode 100644 index e83a1671..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/DriverFactory.java +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import io.netty.bootstrap.Bootstrap; -import io.netty.util.concurrent.EventExecutorGroup; -import io.netty.util.internal.logging.InternalLoggerFactory; - -import java.io.IOException; -import java.net.URI; -import java.security.GeneralSecurityException; - -import org.neo4j.driver.internal.async.connection.BootstrapFactory; -import org.neo4j.driver.internal.async.connection.ChannelConnector; -import org.neo4j.driver.internal.async.connection.ChannelConnectorImpl; -import org.neo4j.driver.internal.async.pool.ConnectionPoolImpl; -import org.neo4j.driver.internal.async.pool.PoolSettings; -import org.neo4j.driver.internal.cluster.DnsResolver; -import org.neo4j.driver.internal.cluster.RoutingContext; -import org.neo4j.driver.internal.cluster.RoutingSettings; -import org.neo4j.driver.internal.cluster.loadbalancing.LeastConnectedLoadBalancingStrategy; -import org.neo4j.driver.internal.cluster.loadbalancing.LoadBalancer; -import org.neo4j.driver.internal.cluster.loadbalancing.LoadBalancingStrategy; -import org.neo4j.driver.internal.cluster.loadbalancing.RoundRobinLoadBalancingStrategy; -import org.neo4j.driver.internal.logging.NettyLogging; -import org.neo4j.driver.internal.metrics.InternalMetricsProvider; -import org.neo4j.driver.internal.metrics.MetricsProvider; -import org.neo4j.driver.internal.retry.ExponentialBackoffRetryLogic; -import org.neo4j.driver.internal.retry.RetryLogic; -import org.neo4j.driver.internal.retry.RetrySettings; -import org.neo4j.driver.internal.security.SecurityPlan; -import org.neo4j.driver.internal.spi.ConnectionPool; -import org.neo4j.driver.internal.spi.ConnectionProvider; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.AuthToken; -import org.neo4j.driver.AuthTokens; -import org.neo4j.driver.Config; -import org.neo4j.driver.Driver; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.exceptions.ServiceUnavailableException; -import org.neo4j.driver.net.ServerAddressResolver; - -import static java.lang.String.format; -import static org.neo4j.driver.internal.metrics.MetricsProvider.METRICS_DISABLED_PROVIDER; -import static org.neo4j.driver.internal.security.SecurityPlan.insecure; - -public class DriverFactory -{ - public static final String BOLT_URI_SCHEME = "bolt"; - public static final String BOLT_ROUTING_URI_SCHEME = "neo4j"; - - public final Driver newInstance( URI uri, AuthToken authToken, RoutingSettings routingSettings, - RetrySettings retrySettings, Config config ) - { - authToken = authToken == null ? AuthTokens.none() : authToken; - - BoltServerAddress address = new BoltServerAddress( uri ); - RoutingSettings newRoutingSettings = routingSettings.withRoutingContext( new RoutingContext( uri ) ); - SecurityPlan securityPlan = createSecurityPlan( address, config ); - - InternalLoggerFactory.setDefaultFactory( new NettyLogging( config.logging() ) ); - Bootstrap bootstrap = createBootstrap(); - EventExecutorGroup eventExecutorGroup = bootstrap.config().group(); - RetryLogic retryLogic = createRetryLogic( retrySettings, eventExecutorGroup, config.logging() ); - - MetricsProvider metricsProvider = createDriverMetrics( config, createClock() ); - ConnectionPool connectionPool = createConnectionPool( authToken, securityPlan, bootstrap, metricsProvider, config ); - - InternalDriver driver = createDriver( uri, securityPlan, address, connectionPool, eventExecutorGroup, newRoutingSettings, retryLogic, metricsProvider, config ); - - verifyConnectivity( driver, connectionPool, config ); - - return driver; - } - - protected ConnectionPool createConnectionPool( AuthToken authToken, SecurityPlan securityPlan, Bootstrap bootstrap, - MetricsProvider metricsProvider, Config config ) - { - Clock clock = createClock(); - ConnectionSettings settings = new ConnectionSettings( authToken, config.connectionTimeoutMillis() ); - ChannelConnector connector = createConnector( settings, securityPlan, config, clock ); - PoolSettings poolSettings = new PoolSettings( config.maxConnectionPoolSize(), - config.connectionAcquisitionTimeoutMillis(), config.maxConnectionLifetimeMillis(), - config.idleTimeBeforeConnectionTest() - ); - return new ConnectionPoolImpl( connector, bootstrap, poolSettings, metricsProvider.metricsListener(), config.logging(), clock ); - } - - protected static MetricsProvider createDriverMetrics( Config config, Clock clock ) - { - if( config.isMetricsEnabled() ) - { - return new InternalMetricsProvider( clock ); - } - else - { - return METRICS_DISABLED_PROVIDER; - } - } - - protected ChannelConnector createConnector( ConnectionSettings settings, SecurityPlan securityPlan, - Config config, Clock clock ) - { - return new ChannelConnectorImpl( settings, securityPlan, config.logging(), clock ); - } - - private InternalDriver createDriver( URI uri, SecurityPlan securityPlan, BoltServerAddress address, ConnectionPool connectionPool, - EventExecutorGroup eventExecutorGroup, RoutingSettings routingSettings, RetryLogic retryLogic, MetricsProvider metricsProvider, Config config ) - { - try - { - String scheme = uri.getScheme().toLowerCase(); - switch ( scheme ) - { - case BOLT_URI_SCHEME: - assertNoRoutingContext( uri, routingSettings ); - return createDirectDriver( securityPlan, address, connectionPool, retryLogic, metricsProvider, config ); - case BOLT_ROUTING_URI_SCHEME: - return createRoutingDriver( securityPlan, address, connectionPool, eventExecutorGroup, routingSettings, retryLogic, metricsProvider, config ); - default: - throw new ClientException( format( "Unsupported URI scheme: %s", scheme ) ); - } - } - catch ( Throwable driverError ) - { - // we need to close the connection pool if driver creation threw exception - closeConnectionPoolAndSuppressError( connectionPool, driverError ); - throw driverError; - } - } - - /** - * Creates a new driver for "bolt" scheme. - *

- * This method is protected only for testing - */ - protected InternalDriver createDirectDriver( SecurityPlan securityPlan, BoltServerAddress address, ConnectionPool connectionPool, RetryLogic retryLogic, - MetricsProvider metricsProvider, Config config ) - { - ConnectionProvider connectionProvider = new DirectConnectionProvider( address, connectionPool ); - SessionFactory sessionFactory = createSessionFactory( connectionProvider, retryLogic, config ); - InternalDriver driver = createDriver( securityPlan, sessionFactory, metricsProvider, config ); - Logger log = config.logging().getLog( Driver.class.getSimpleName() ); - log.info( "Direct driver instance %s created for server address %s", driver.hashCode(), address ); - return driver; - } - - /** - * Creates new a new driver for "neo4j" scheme. - *

- * This method is protected only for testing - */ - protected InternalDriver createRoutingDriver( SecurityPlan securityPlan, BoltServerAddress address, ConnectionPool connectionPool, - EventExecutorGroup eventExecutorGroup, RoutingSettings routingSettings, RetryLogic retryLogic, MetricsProvider metricsProvider, Config config ) - { - if ( !securityPlan.isRoutingCompatible() ) - { - throw new IllegalArgumentException( "The chosen security plan is not compatible with a routing driver" ); - } - ConnectionProvider connectionProvider = createLoadBalancer( address, connectionPool, eventExecutorGroup, - config, routingSettings ); - SessionFactory sessionFactory = createSessionFactory( connectionProvider, retryLogic, config ); - InternalDriver driver = createDriver( securityPlan, sessionFactory, metricsProvider, config ); - Logger log = config.logging().getLog( Driver.class.getSimpleName() ); - log.info( "Routing driver instance %s created for server address %s", driver.hashCode(), address ); - return driver; - } - - /** - * Creates new {@link Driver}. - *

- * This method is protected only for testing - */ - protected InternalDriver createDriver( SecurityPlan securityPlan, SessionFactory sessionFactory, MetricsProvider metricsProvider, Config config ) - { - return new InternalDriver( securityPlan, sessionFactory, metricsProvider, config.logging() ); - } - - /** - * Creates new {@link LoadBalancer} for the routing driver. - *

- * This method is protected only for testing - */ - protected LoadBalancer createLoadBalancer( BoltServerAddress address, ConnectionPool connectionPool, - EventExecutorGroup eventExecutorGroup, Config config, RoutingSettings routingSettings ) - { - LoadBalancingStrategy loadBalancingStrategy = createLoadBalancingStrategy( config, connectionPool ); - ServerAddressResolver resolver = createResolver( config ); - return new LoadBalancer( address, routingSettings, connectionPool, eventExecutorGroup, createClock(), - config.logging(), loadBalancingStrategy, resolver ); - } - - private static LoadBalancingStrategy createLoadBalancingStrategy( Config config, - ConnectionPool connectionPool ) - { - switch ( config.loadBalancingStrategy() ) - { - case ROUND_ROBIN: - return new RoundRobinLoadBalancingStrategy( config.logging() ); - case LEAST_CONNECTED: - return new LeastConnectedLoadBalancingStrategy( connectionPool, config.logging() ); - default: - throw new IllegalArgumentException( "Unknown load balancing strategy: " + config.loadBalancingStrategy() ); - } - } - - private static ServerAddressResolver createResolver( Config config ) - { - ServerAddressResolver configuredResolver = config.resolver(); - return configuredResolver != null ? configuredResolver : new DnsResolver( config.logging() ); - } - - /** - * Creates new {@link Clock}. - *

- * This method is protected only for testing - */ - protected Clock createClock() - { - return Clock.SYSTEM; - } - - /** - * Creates new {@link SessionFactory}. - *

- * This method is protected only for testing - */ - protected SessionFactory createSessionFactory( ConnectionProvider connectionProvider, RetryLogic retryLogic, - Config config ) - { - return new SessionFactoryImpl( connectionProvider, retryLogic, config ); - } - - /** - * Creates new {@link RetryLogic}. - *

- * This method is protected only for testing - */ - protected RetryLogic createRetryLogic( RetrySettings settings, EventExecutorGroup eventExecutorGroup, - Logging logging ) - { - return new ExponentialBackoffRetryLogic( settings, eventExecutorGroup, createClock(), logging ); - } - - /** - * Creates new {@link Bootstrap}. - *

- * This method is protected only for testing - */ - protected Bootstrap createBootstrap() - { - return BootstrapFactory.newBootstrap(); - } - - private static SecurityPlan createSecurityPlan( BoltServerAddress address, Config config ) - { - try - { - return createSecurityPlanImpl( address, config ); - } - catch ( GeneralSecurityException | IOException ex ) - { - throw new ClientException( "Unable to establish SSL parameters", ex ); - } - } - - /* - * Establish a complete SecurityPlan based on the details provided for - * driver construction. - */ - @SuppressWarnings( "deprecation" ) - private static SecurityPlan createSecurityPlanImpl( BoltServerAddress address, Config config ) - throws GeneralSecurityException, IOException - { - if ( config.encrypted() ) - { - Logger logger = config.logging().getLog( "SecurityPlan" ); - Config.TrustStrategy trustStrategy = config.trustStrategy(); - boolean hostnameVerificationEnabled = trustStrategy.isHostnameVerificationEnabled(); - switch ( trustStrategy.strategy() ) - { - case TRUST_ON_FIRST_USE: - logger.warn( - "Option `TRUST_ON_FIRST_USE` has been deprecated and will be removed in a future " + - "version of the driver. Please switch to use `TRUST_ALL_CERTIFICATES` instead." ); - return SecurityPlan.forTrustOnFirstUse( trustStrategy.certFile(), hostnameVerificationEnabled, address, logger ); - case TRUST_SIGNED_CERTIFICATES: - logger.warn( - "Option `TRUST_SIGNED_CERTIFICATE` has been deprecated and will be removed in a future " + - "version of the driver. Please switch to use `TRUST_CUSTOM_CA_SIGNED_CERTIFICATES` instead." ); - // intentional fallthrough - - case TRUST_CUSTOM_CA_SIGNED_CERTIFICATES: - return SecurityPlan.forCustomCASignedCertificates( trustStrategy.certFile(), hostnameVerificationEnabled ); - case TRUST_SYSTEM_CA_SIGNED_CERTIFICATES: - return SecurityPlan.forSystemCASignedCertificates( hostnameVerificationEnabled ); - case TRUST_ALL_CERTIFICATES: - return SecurityPlan.forAllCertificates( hostnameVerificationEnabled ); - default: - throw new ClientException( - "Unknown TLS authentication strategy: " + trustStrategy.strategy().name() ); - } - } - else - { - return insecure(); - } - } - - private static void assertNoRoutingContext( URI uri, RoutingSettings routingSettings ) - { - RoutingContext routingContext = routingSettings.routingContext(); - if ( routingContext.isDefined() ) - { - throw new IllegalArgumentException( - "Routing parameters are not supported with scheme 'bolt'. Given URI: '" + uri + "'" ); - } - } - - private static void verifyConnectivity( InternalDriver driver, ConnectionPool connectionPool, Config config ) - { - try - { - // block to verify connectivity, close connection pool if thread gets interrupted - Futures.blockingGet( driver.verifyConnectivity(), - () -> closeConnectionPoolOnThreadInterrupt( connectionPool, config.logging() ) ); - } - catch ( Throwable connectionError ) - { - if ( Thread.currentThread().isInterrupted() ) - { - // current thread has been interrupted while verifying connectivity - // connection pool should've been closed - throw new ServiceUnavailableException( "Unable to create driver. Thread has been interrupted.", - connectionError ); - } - - // we need to close the connection pool if driver creation threw exception - closeConnectionPoolAndSuppressError( connectionPool, connectionError ); - throw connectionError; - } - } - - private static void closeConnectionPoolAndSuppressError( ConnectionPool connectionPool, Throwable mainError ) - { - try - { - Futures.blockingGet( connectionPool.close() ); - } - catch ( Throwable closeError ) - { - if ( mainError != closeError ) - { - mainError.addSuppressed( closeError ); - } - } - } - - private static void closeConnectionPoolOnThreadInterrupt( ConnectionPool pool, Logging logging ) - { - Logger log = logging.getLog( Driver.class.getSimpleName() ); - log.warn( "Driver creation interrupted while verifying connectivity. Connection pool will be closed" ); - pool.close(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/FailableCursor.java b/src/graiph-driver/java/org/neo4j/driver/internal/FailableCursor.java deleted file mode 100644 index b650a218..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/FailableCursor.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.concurrent.CompletionStage; - -public interface FailableCursor -{ - CompletionStage failureAsync(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalDriver.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalDriver.java deleted file mode 100644 index 9880cf2d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalDriver.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; - -import org.neo4j.driver.Driver; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.Metrics; -import org.neo4j.driver.Session; -import org.neo4j.driver.SessionParametersTemplate; -import org.neo4j.driver.async.AsyncSession; -import org.neo4j.driver.internal.async.InternalAsyncSession; -import org.neo4j.driver.internal.async.NetworkSession; -import org.neo4j.driver.internal.metrics.MetricsProvider; -import org.neo4j.driver.internal.reactive.InternalRxSession; -import org.neo4j.driver.internal.security.SecurityPlan; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.reactive.RxSession; - -import static org.neo4j.driver.internal.util.Futures.completedWithNull; - -public class InternalDriver implements Driver -{ - private final SecurityPlan securityPlan; - private final SessionFactory sessionFactory; - private final Logger log; - - private AtomicBoolean closed = new AtomicBoolean( false ); - private final MetricsProvider metricsProvider; - - InternalDriver( SecurityPlan securityPlan, SessionFactory sessionFactory, MetricsProvider metricsProvider, Logging logging ) - { - this.securityPlan = securityPlan; - this.sessionFactory = sessionFactory; - this.metricsProvider = metricsProvider; - this.log = logging.getLog( Driver.class.getSimpleName() ); - } - - @Override - public Session session() - { - return new InternalSession( newSession( SessionParameters.empty() ) ); - } - - @Override - public Session session( Consumer templateConsumer ) - { - SessionParameters.Template template = SessionParameters.template(); - templateConsumer.accept( template ); - return new InternalSession( newSession( template.build() ) ); - } - - @Override - public RxSession rxSession() - { - return new InternalRxSession( newSession( SessionParameters.empty() ) ); - } - - @Override - public RxSession rxSession( Consumer templateConsumer ) - { - SessionParameters.Template template = SessionParameters.template(); - templateConsumer.accept( template ); - return new InternalRxSession( newSession( template.build() ) ); - } - - @Override - public AsyncSession asyncSession() - { - return new InternalAsyncSession( newSession( SessionParameters.empty() ) ); - } - - @Override - public AsyncSession asyncSession( Consumer templateConsumer ) - { - SessionParameters.Template template = SessionParameters.template(); - templateConsumer.accept( template ); - return new InternalAsyncSession( newSession( template.build() ) ); - } - - @Override - public Metrics metrics() - { - return metricsProvider.metrics(); - } - - @Override - public boolean isEncrypted() - { - assertOpen(); - return securityPlan.requiresEncryption(); - } - - @Override - public void close() - { - Futures.blockingGet( closeAsync() ); - } - - @Override - public CompletionStage closeAsync() - { - if ( closed.compareAndSet( false, true ) ) - { - log.info( "Closing driver instance %s", hashCode() ); - return sessionFactory.close(); - } - return completedWithNull(); - } - - public CompletionStage verifyConnectivity() - { - return sessionFactory.verifyConnectivity(); - } - - /** - * Get the underlying session factory. - *

- * This method is only for testing - * - * @return the session factory used by this driver. - */ - public SessionFactory getSessionFactory() - { - return sessionFactory; - } - - private static RuntimeException driverCloseException() - { - return new IllegalStateException( "This driver instance has already been closed" ); - } - - public NetworkSession newSession( SessionParameters parameters ) - { - assertOpen(); - NetworkSession session = sessionFactory.newInstance( parameters ); - if ( closed.get() ) - { - // session does not immediately acquire connection, it is fine to just throw - throw driverCloseException(); - } - return session; - } - - private void assertOpen() - { - if ( closed.get() ) - { - throw driverCloseException(); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalEntity.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalEntity.java deleted file mode 100644 index ae3750fd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalEntity.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Map; - -import org.neo4j.driver.internal.util.Extract; -import org.neo4j.driver.internal.util.Iterables; -import org.neo4j.driver.internal.value.MapValue; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import org.neo4j.driver.types.Entity; -import java.util.function.Function; - -import static org.neo4j.driver.Values.ofObject; - -public abstract class InternalEntity implements Entity, AsValue -{ - private final long id; - private final Map properties; - - public InternalEntity( long id, Map properties ) - { - this.id = id; - this.properties = properties; - } - - @Override - public long id() - { - return id; - } - - @Override - public int size() - { - return properties.size(); - } - - @Override - public Map asMap() - { - return asMap( ofObject() ); - } - - @Override - public Map asMap( Function mapFunction ) - { - return Extract.map( properties, mapFunction ); - } - - @Override - public Value asValue() - { - return new MapValue( properties ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - InternalEntity that = (InternalEntity) o; - - return id == that.id; - - } - - @Override - public int hashCode() - { - return (int)(id ^ (id >>> 32)); - } - - @Override - public String toString() - { - return "Entity{" + - "id=" + id + - ", properties=" + properties + - '}'; - } - - @Override - public boolean containsKey( String key ) - { - return properties.containsKey( key ); - } - - @Override - public Iterable keys() - { - return properties.keySet(); - } - - @Override - public Value get( String key ) - { - Value value = properties.get( key ); - return value == null ? Values.NULL : value; - } - - @Override - public Iterable values() - { - return properties.values(); - } - - @Override - public Iterable values( Function mapFunction ) - { - return Iterables.map( properties.values(), mapFunction ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalIsoDuration.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalIsoDuration.java deleted file mode 100644 index 81198291..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalIsoDuration.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.time.Duration; -import java.time.Period; -import java.time.temporal.Temporal; -import java.time.temporal.TemporalUnit; -import java.time.temporal.UnsupportedTemporalTypeException; -import java.util.List; -import java.util.Objects; - -import org.neo4j.driver.types.IsoDuration; - -import static java.time.temporal.ChronoUnit.DAYS; -import static java.time.temporal.ChronoUnit.MONTHS; -import static java.time.temporal.ChronoUnit.NANOS; -import static java.time.temporal.ChronoUnit.SECONDS; -import static java.util.Arrays.asList; -import static java.util.Collections.unmodifiableList; - -public class InternalIsoDuration implements IsoDuration -{ - private static final long NANOS_PER_SECOND = 1_000_000_000; - private static final List SUPPORTED_UNITS = unmodifiableList( asList( MONTHS, DAYS, SECONDS, NANOS ) ); - - private final long months; - private final long days; - private final long seconds; - private final int nanoseconds; - - public InternalIsoDuration( Period period ) - { - this( period.toTotalMonths(), period.getDays(), Duration.ZERO ); - } - - public InternalIsoDuration( Duration duration ) - { - this( 0, 0, duration ); - } - - public InternalIsoDuration( long months, long days, long seconds, int nanoseconds ) - { - this( months, days, Duration.ofSeconds( seconds, nanoseconds ) ); - } - - InternalIsoDuration( long months, long days, Duration duration ) - { - this.months = months; - this.days = days; - this.seconds = duration.getSeconds(); // normalized value of seconds - this.nanoseconds = duration.getNano(); // normalized value of nanoseconds in [0, 999_999_999] - } - - @Override - public long months() - { - return months; - } - - @Override - public long days() - { - return days; - } - - @Override - public long seconds() - { - return seconds; - } - - @Override - public int nanoseconds() - { - return nanoseconds; - } - - @Override - public long get( TemporalUnit unit ) - { - if ( unit == MONTHS ) - { - return months; - } - else if ( unit == DAYS ) - { - return days; - } - else if ( unit == SECONDS ) - { - return seconds; - } - else if ( unit == NANOS ) - { - return nanoseconds; - } - else - { - throw new UnsupportedTemporalTypeException( "Unsupported unit: " + unit ); - } - } - - @Override - public List getUnits() - { - return SUPPORTED_UNITS; - } - - @Override - public Temporal addTo( Temporal temporal ) - { - if ( months != 0 ) - { - temporal = temporal.plus( months, MONTHS ); - } - if ( days != 0 ) - { - temporal = temporal.plus( days, DAYS ); - } - if ( seconds != 0 ) - { - temporal = temporal.plus( seconds, SECONDS ); - } - if ( nanoseconds != 0 ) - { - temporal = temporal.plus( nanoseconds, NANOS ); - } - return temporal; - } - - @Override - public Temporal subtractFrom( Temporal temporal ) - { - if ( months != 0 ) - { - temporal = temporal.minus( months, MONTHS ); - } - if ( days != 0 ) - { - temporal = temporal.minus( days, DAYS ); - } - if ( seconds != 0 ) - { - temporal = temporal.minus( seconds, SECONDS ); - } - if ( nanoseconds != 0 ) - { - temporal = temporal.minus( nanoseconds, NANOS ); - } - return temporal; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - InternalIsoDuration that = (InternalIsoDuration) o; - return months == that.months && - days == that.days && - seconds == that.seconds && - nanoseconds == that.nanoseconds; - } - - @Override - public int hashCode() - { - return Objects.hash( months, days, seconds, nanoseconds ); - } - - @Override - public String toString() - { - StringBuilder sb = new StringBuilder(); - sb.append( 'P' ); - sb.append( months ).append( 'M' ); - sb.append( days ).append( 'D' ); - sb.append( 'T' ); - if ( seconds < 0 && nanoseconds > 0 ) - { - if ( seconds == -1 ) - { - sb.append( "-0" ); - } - else - { - sb.append( seconds + 1 ); - } - } - else - { - sb.append( seconds ); - } - if ( nanoseconds > 0 ) - { - int pos = sb.length(); - // append nanoseconds as a 10-digit string with leading '1' that is later replaced by a '.' - if ( seconds < 0 ) - { - sb.append( 2 * NANOS_PER_SECOND - nanoseconds ); - } - else - { - sb.append( NANOS_PER_SECOND + nanoseconds ); - } - sb.setCharAt( pos, '.' ); // replace '1' with '.' - } - sb.append( 'S' ); - return sb.toString(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalNode.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalNode.java deleted file mode 100644 index c7721cde..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalNode.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Collection; -import java.util.Collections; -import java.util.Map; - -import org.neo4j.driver.internal.value.NodeValue; -import org.neo4j.driver.types.Node; -import org.neo4j.driver.Value; - -/** - * {@link Node} implementation that directly contains labels and properties. - */ -public class InternalNode extends InternalEntity implements Node -{ - private final Collection labels; - - public InternalNode( long id ) - { - this( id, Collections.emptyList(), Collections.emptyMap() ); - } - - public InternalNode( long id, Collection labels, Map properties ) - { - super( id, properties ); - this.labels = labels; - } - - @Override - public Collection labels() - { - return labels; - } - - @Override - public boolean hasLabel( String label ) - { - return labels.contains( label ); - } - - @Override - public Value asValue() - { - return new NodeValue( this ); - } - - @Override - public String toString() - { - return String.format( "node<%s>", id() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalPair.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalPair.java deleted file mode 100644 index 50cba797..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalPair.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Objects; - -import org.neo4j.driver.util.Pair; - -public class InternalPair implements Pair -{ - private final K key; - private final V value; - - protected InternalPair( K key, V value ) - { - Objects.requireNonNull( key ); - Objects.requireNonNull( value ); - this.key = key; - this.value = value; - } - - public K key() - { - return key; - } - - public V value() - { - return value; - } - - public static Pair of( K key, V value ) - { - return new InternalPair<>( key, value ); - } - - @Override - public String toString() - { - return String.format( "%s: %s", Objects.toString( key ), Objects.toString( value ) ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - InternalPair that = (InternalPair) o; - - return key.equals( that.key ) && value.equals( that.value ); - } - - @Override - public int hashCode() - { - int result = key.hashCode(); - result = 31 * result + value.hashCode(); - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalPath.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalPath.java deleted file mode 100644 index e5c6ffbe..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalPath.java +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -import org.neo4j.driver.internal.value.PathValue; -import org.neo4j.driver.types.Entity; -import org.neo4j.driver.types.Node; -import org.neo4j.driver.types.Path; -import org.neo4j.driver.types.Relationship; -import org.neo4j.driver.Value; - -/** - * {@link Path} implementation that directly contains all nodes and relationships. - */ -public class InternalPath implements Path, AsValue -{ - public static class SelfContainedSegment implements Segment - { - private final Node start; - private final Relationship relationship; - private final Node end; - - public SelfContainedSegment( Node start, Relationship relationship, Node end ) - { - this.start = start; - this.relationship = relationship; - this.end = end; - } - - @Override - public Node start() - { - return start; - } - - @Override - public Relationship relationship() - { - return relationship; - } - - @Override - public Node end() - { - return end; - } - - @Override - public boolean equals( Object other ) - { - if ( this == other ) - { - return true; - } - if ( other == null || getClass() != other.getClass() ) - { - return false; - } - - SelfContainedSegment that = (SelfContainedSegment) other; - return start.equals( that.start ) && end.equals( that.end ) && relationship.equals( that.relationship ); - - } - - @Override - public int hashCode() - { - int result = start.hashCode(); - result = 31 * result + relationship.hashCode(); - result = 31 * result + end.hashCode(); - return result; - } - - @Override - public String toString() - { - return String.format( relationship.startNodeId() == start.id() ? - "(%s)-[%s:%s]->(%s)" : "(%s)<-[%s:%s]-(%s)", - start.id(), relationship.id(), relationship.type(), end.id() ); - } - } - - private static boolean isEndpoint( Node node, Relationship relationship ) - { - return node.id() == relationship.startNodeId() || node.id() == relationship.endNodeId(); - } - - private final List nodes; - private final List relationships; - private final List segments; - - public InternalPath( List alternatingNodeAndRel ) - { - nodes = newList( alternatingNodeAndRel.size() / 2 + 1 ); - relationships = newList( alternatingNodeAndRel.size() / 2 ); - segments = newList( alternatingNodeAndRel.size() / 2 ); - - if ( alternatingNodeAndRel.size() % 2 == 0 ) - { - throw new IllegalArgumentException( "An odd number of entities are required to build a path" ); - } - Node lastNode = null; - Relationship lastRelationship = null; - int index = 0; - for ( Entity entity : alternatingNodeAndRel ) - { - if ( entity == null ) - { - throw new IllegalArgumentException( "Path entities cannot be null" ); - } - if ( index % 2 == 0 ) - { - // even index - this should be a node - try - { - lastNode = (Node) entity; - if ( nodes.isEmpty() || isEndpoint( lastNode, lastRelationship ) ) - { - nodes.add( lastNode ); - } - else - { - throw new IllegalArgumentException( - "Node argument " + index + " is not an endpoint of relationship argument " + (index - - 1) ); - } - } - catch ( ClassCastException e ) - { - String cls = entity.getClass().getName(); - throw new IllegalArgumentException( - "Expected argument " + index + " to be a node " + index + " but found a " + cls + " " + - "instead" ); - } - } - else - { - // odd index - this should be a relationship - try - { - lastRelationship = (Relationship) entity; - if ( isEndpoint( lastNode, lastRelationship ) ) - { - relationships.add( lastRelationship ); - } - else - { - throw new IllegalArgumentException( - "Node argument " + (index - 1) + " is not an endpoint of relationship argument " + - index ); - } - } - catch ( ClassCastException e ) - { - String cls = entity.getClass().getName(); - throw new IllegalArgumentException( - "Expected argument " + index + " to be a relationship but found a " + cls + " instead" ); - } - } - index += 1; - } - buildSegments(); - } - - public InternalPath( Entity... alternatingNodeAndRel ) - { - this( Arrays.asList( alternatingNodeAndRel ) ); - } - - public InternalPath( List segments, List nodes, List relationships ) - { - this.segments = segments; - this.nodes = nodes; - this.relationships = relationships; - } - - private List newList( int size ) - { - return size == 0 ? Collections.emptyList() : new ArrayList( size ); - } - - @Override - public int length() - { - return relationships.size(); - } - - @Override - public boolean contains( Node node ) - { - return nodes.contains( node ); - } - - @Override - public boolean contains( Relationship relationship ) - { - return relationships.contains( relationship ); - } - - @Override - public Iterable nodes() - { - return nodes; - } - - @Override - public Iterable relationships() - { - return relationships; - } - - @Override - public Node start() - { - return nodes.get( 0 ); - } - - @Override - public Node end() - { - return nodes.get( nodes.size() - 1 ); - } - - @Override - public Iterator iterator() - { - return segments.iterator(); - } - - @Override - public Value asValue() - { - return new PathValue( this ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - InternalPath segments1 = (InternalPath) o; - - return segments.equals( segments1.segments ); - - } - - @Override - public int hashCode() - { - return segments.hashCode(); - } - - @Override - public String toString() - { - - return "path" + segments; - } - - private void buildSegments() - { - for ( int i = 0; i < relationships.size(); i++ ) - { - segments.add( new SelfContainedSegment( nodes.get( i ), relationships.get( i ), nodes.get( i + 1 ) ) ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalPoint2D.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalPoint2D.java deleted file mode 100644 index b35a500e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalPoint2D.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Objects; - -import org.neo4j.driver.types.Point; - -public class InternalPoint2D implements Point -{ - private final int srid; - private final double x; - private final double y; - - public InternalPoint2D( int srid, double x, double y ) - { - this.srid = srid; - this.x = x; - this.y = y; - } - - @Override - public int srid() - { - return srid; - } - - @Override - public double x() - { - return x; - } - - @Override - public double y() - { - return y; - } - - @Override - public double z() - { - return Double.NaN; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - InternalPoint2D that = (InternalPoint2D) o; - return srid == that.srid && - Double.compare( that.x, x ) == 0 && - Double.compare( that.y, y ) == 0; - } - - @Override - public int hashCode() - { - return Objects.hash( srid, x, y ); - } - - @Override - public String toString() - { - return "Point{" + - "srid=" + srid + - ", x=" + x + - ", y=" + y + - '}'; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalPoint3D.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalPoint3D.java deleted file mode 100644 index a5aabb62..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalPoint3D.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Objects; - -import org.neo4j.driver.types.Point; - -public class InternalPoint3D implements Point -{ - private final int srid; - private final double x; - private final double y; - private final double z; - - public InternalPoint3D( int srid, double x, double y, double z ) - { - this.srid = srid; - this.x = x; - this.y = y; - this.z = z; - } - - @Override - public int srid() - { - return srid; - } - - @Override - public double x() - { - return x; - } - - @Override - public double y() - { - return y; - } - - @Override - public double z() - { - return z; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - InternalPoint3D that = (InternalPoint3D) o; - return srid == that.srid && - Double.compare( that.x, x ) == 0 && - Double.compare( that.y, y ) == 0 && - Double.compare( that.z, z ) == 0; - } - - @Override - public int hashCode() - { - return Objects.hash( srid, x, y, z ); - } - - @Override - public String toString() - { - return "Point{" + - "srid=" + srid + - ", x=" + x + - ", y=" + y + - ", z=" + z + - '}'; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalRecord.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalRecord.java deleted file mode 100644 index fe0f9a5b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalRecord.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; - -import org.neo4j.driver.internal.types.InternalMapAccessorWithDefaultValue; -import org.neo4j.driver.internal.util.Extract; -import org.neo4j.driver.Record; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import java.util.function.Function; -import org.neo4j.driver.util.Pair; - -import static java.lang.String.format; -import static org.neo4j.driver.internal.util.Format.formatPairs; -import static org.neo4j.driver.Values.ofObject; -import static org.neo4j.driver.Values.ofValue; - -public class InternalRecord extends InternalMapAccessorWithDefaultValue implements Record -{ - private final List keys; - private final Value[] values; - private int hashCode = 0; - - public InternalRecord( List keys, Value[] values ) - { - this.keys = keys; - this.values = values; - } - - @Override - public List keys() - { - return keys; - } - - @Override - public List values() - { - return Arrays.asList( values ); - } - - @Override - public List> fields() - { - return Extract.fields( this, ofValue() ); - } - - @Override - public int index( String key ) - { - int result = keys.indexOf( key ); - if ( result == -1 ) - { - throw new NoSuchElementException( "Unknown key: " + key ); - } - else - { - return result; - } - } - - @Override - public boolean containsKey( String key ) - { - return keys.contains( key ); - } - - @Override - public Value get( String key ) - { - int fieldIndex = keys.indexOf( key ); - - if ( fieldIndex == -1 ) - { - return Values.NULL; - } - else - { - return values[fieldIndex]; - } - } - - @Override - public Value get( int index ) - { - return index >= 0 && index < values.length ? values[index] : Values.NULL; - } - - @Override - public int size() - { - return values.length; - } - - @Override - public Map asMap() - { - return Extract.map( this, ofObject() ); - } - - @Override - public Map asMap( Function mapper ) - { - return Extract.map( this, mapper ); - } - - @Override - public String toString() - { - return format( "Record<%s>", formatPairs( asMap( ofValue() ) ) ); - } - - @Override - public boolean equals( Object other ) - { - if ( this == other ) - { - return true; - } - else if ( other instanceof Record ) - { - Record otherRecord = (Record) other; - int size = size(); - if ( ! ( size == otherRecord.size() ) ) - { - return false; - } - if ( ! keys.equals( otherRecord.keys() ) ) - { - return false; - } - for ( int i = 0; i < size; i++ ) - { - Value value = get( i ); - Value otherValue = otherRecord.get( i ); - if ( ! value.equals( otherValue ) ) - { - return false; - } - } - return true; - } - else - { - return false; - } - } - - @Override - public int hashCode() - { - if ( hashCode == 0 ) - { - hashCode = 31 * keys.hashCode() + Arrays.hashCode( values ); - } - return hashCode; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalRelationship.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalRelationship.java deleted file mode 100644 index 60dd5d8f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalRelationship.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Collections; -import java.util.Map; - -import org.neo4j.driver.internal.value.RelationshipValue; -import org.neo4j.driver.types.Relationship; -import org.neo4j.driver.Value; - -/** - * {@link Relationship} implementation that directly contains type and properties. - */ -public class InternalRelationship extends InternalEntity implements Relationship -{ - private long start; - private long end; - private final String type; - - public InternalRelationship( long id, long start, long end, String type ) - { - this( id, start, end, type, Collections.emptyMap() ); - } - - public InternalRelationship( long id, long start, long end, String type, - Map properties ) - { - super( id, properties ); - this.start = start; - this.end = end; - this.type = type; - } - - @Override - public boolean hasType( String relationshipType ) - { - return type().equals( relationshipType ); - } - - /** Modify the start/end identities of this relationship */ - public void setStartAndEnd( long start, long end ) - { - this.start = start; - this.end = end; - } - - @Override - public long startNodeId() - { - return start; - } - - @Override - public long endNodeId() - { - return end; - } - - @Override - public String type() - { - return type; - } - - @Override - public Value asValue() - { - return new RelationshipValue( this ); - } - - @Override - public String toString() - { - return String.format( "relationship<%s>", id() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalSession.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalSession.java deleted file mode 100644 index 5c937c8f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalSession.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Map; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Session; -import org.neo4j.driver.Statement; -import org.neo4j.driver.StatementResult; -import org.neo4j.driver.Transaction; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.TransactionWork; -import org.neo4j.driver.async.StatementResultCursor; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.async.NetworkSession; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.Futures; - -import static java.util.Collections.emptyMap; - -public class InternalSession extends AbstractStatementRunner implements Session -{ - private final NetworkSession session; - - public InternalSession( NetworkSession session ) - { - this.session = session; - } - - @Override - public StatementResult run( Statement statement ) - { - return run( statement, TransactionConfig.empty() ); - } - - @Override - public StatementResult run( String statement, TransactionConfig config ) - { - return run( statement, emptyMap(), config ); - } - - @Override - public StatementResult run( String statement, Map parameters, TransactionConfig config ) - { - return run( new Statement( statement, parameters ), config ); - } - - @Override - public StatementResult run( Statement statement, TransactionConfig config ) - { - StatementResultCursor cursor = Futures.blockingGet( session.runAsync( statement, config, false ), - () -> terminateConnectionOnThreadInterrupt( "Thread interrupted while running query in session" ) ); - - // query executed, it is safe to obtain a connection in a blocking way - Connection connection = Futures.getNow( session.connectionAsync() ); - return new InternalStatementResult( connection, cursor ); - } - - @Override - public boolean isOpen() - { - return session.isOpen(); - } - - @Override - public void close() - { - Futures.blockingGet( session.closeAsync(), () -> terminateConnectionOnThreadInterrupt( "Thread interrupted while closing the session" ) ); - } - - @Override - public Transaction beginTransaction() - { - return beginTransaction( TransactionConfig.empty() ); - } - - @Override - public Transaction beginTransaction( TransactionConfig config ) - { - ExplicitTransaction tx = Futures.blockingGet( session.beginTransactionAsync( config ), - () -> terminateConnectionOnThreadInterrupt( "Thread interrupted while starting a transaction" ) ); - return new InternalTransaction( tx ); - } - - @Override - public T readTransaction( TransactionWork work ) - { - return readTransaction( work, TransactionConfig.empty() ); - } - - @Override - public T readTransaction( TransactionWork work, TransactionConfig config ) - { - return transaction( AccessMode.READ, work, config ); - } - - @Override - public T writeTransaction( TransactionWork work ) - { - return writeTransaction( work, TransactionConfig.empty() ); - } - - @Override - public T writeTransaction( TransactionWork work, TransactionConfig config ) - { - return transaction( AccessMode.WRITE, work, config ); - } - - @Override - public String lastBookmark() - { - return session.lastBookmark(); - } - - @Override - @SuppressWarnings( "deprecation" ) - public void reset() - { - Futures.blockingGet( session.resetAsync(), () -> terminateConnectionOnThreadInterrupt( "Thread interrupted while resetting the session" ) ); - } - - private T transaction( AccessMode mode, TransactionWork work, TransactionConfig config ) - { - // use different code path compared to async so that work is executed in the caller thread - // caller thread will also be the one who sleeps between retries; - // it is unsafe to execute retries in the event loop threads because this can cause a deadlock - // event loop thread will bock and wait for itself to read some data - return session.retryLogic().retry( () -> { - try ( Transaction tx = beginTransaction( mode, config ) ) - { - try - { - T result = work.execute( tx ); - tx.success(); - return result; - } - catch ( Throwable t ) - { - // mark transaction for failure if the given unit of work threw exception - // this will override any success marks that were made by the unit of work - tx.failure(); - throw t; - } - } - } ); - } - - private Transaction beginTransaction( AccessMode mode, TransactionConfig config ) - { - ExplicitTransaction tx = Futures.blockingGet( session.beginTransactionAsync( mode, config ), - () -> terminateConnectionOnThreadInterrupt( "Thread interrupted while starting a transaction" ) ); - return new InternalTransaction( tx ); - } - - private void terminateConnectionOnThreadInterrupt( String reason ) - { - // try to get current connection if it has been acquired - Connection connection = null; - try - { - connection = Futures.getNow( session.connectionAsync() ); - } - catch ( Throwable ignore ) - { - // ignore errors because handing interruptions is best effort - } - - if ( connection != null ) - { - connection.terminateAndRelease( reason ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalStatementResult.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalStatementResult.java deleted file mode 100644 index a836a074..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalStatementResult.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.List; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.concurrent.CompletionStage; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.Record; -import org.neo4j.driver.StatementResult; -import org.neo4j.driver.async.StatementResultCursor; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.exceptions.NoSuchRecordException; -import org.neo4j.driver.summary.ResultSummary; -import java.util.function.Function; - -public class InternalStatementResult implements StatementResult -{ - private final Connection connection; - private final StatementResultCursor cursor; - private List keys; - - public InternalStatementResult( Connection connection, StatementResultCursor cursor ) - { - this.connection = connection; - this.cursor = cursor; - } - - @Override - public List keys() - { - if ( keys == null ) - { - blockingGet( cursor.peekAsync() ); - keys = cursor.keys(); - } - return keys; - } - - @Override - public boolean hasNext() - { - return blockingGet( cursor.peekAsync() ) != null; - } - - @Override - public Record next() - { - Record record = blockingGet( cursor.nextAsync() ); - if ( record == null ) - { - throw new NoSuchRecordException( "No more records" ); - } - return record; - } - - @Override - public Record single() - { - return blockingGet( cursor.singleAsync() ); - } - - @Override - public Record peek() - { - Record record = blockingGet( cursor.peekAsync() ); - if ( record == null ) - { - throw new NoSuchRecordException( "Cannot peek past the last record" ); - } - return record; - } - - @Override - public Stream stream() - { - Spliterator spliterator = Spliterators.spliteratorUnknownSize( this, Spliterator.IMMUTABLE | Spliterator.ORDERED ); - return StreamSupport.stream( spliterator, false ); - } - - @Override - public List list() - { - return blockingGet( cursor.listAsync() ); - } - - @Override - public List list( Function mapFunction ) - { - return blockingGet( cursor.listAsync( mapFunction ) ); - } - - @Override - public ResultSummary consume() - { - return blockingGet( cursor.consumeAsync() ); - } - - @Override - public ResultSummary summary() - { - return blockingGet( cursor.summaryAsync() ); - } - - @Override - public void remove() - { - throw new ClientException( "Removing records from a result is not supported." ); - } - - private T blockingGet( CompletionStage stage ) - { - return Futures.blockingGet( stage, this::terminateConnectionOnThreadInterrupt ); - } - - private void terminateConnectionOnThreadInterrupt() - { - connection.terminateAndRelease( "Thread interrupted while waiting for result to arrive" ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/InternalTransaction.java b/src/graiph-driver/java/org/neo4j/driver/internal/InternalTransaction.java deleted file mode 100644 index 60d26b69..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/InternalTransaction.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.StatementResult; -import org.neo4j.driver.Transaction; -import org.neo4j.driver.async.StatementResultCursor; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.util.Futures; - -public class InternalTransaction extends AbstractStatementRunner implements Transaction -{ - private final ExplicitTransaction tx; - public InternalTransaction( ExplicitTransaction tx ) - { - this.tx = tx; - } - - @Override - public void success() - { - tx.success(); - } - - @Override - public void failure() - { - tx.failure(); - } - - @Override - public void close() - { - Futures.blockingGet( tx.closeAsync(), - () -> terminateConnectionOnThreadInterrupt( "Thread interrupted while closing the transaction" ) ); - } - - @Override - public StatementResult run( Statement statement ) - { - StatementResultCursor cursor = Futures.blockingGet( tx.runAsync( statement, false ), - () -> terminateConnectionOnThreadInterrupt( "Thread interrupted while running query in transaction" ) ); - return new InternalStatementResult( tx.connection(), cursor ); - } - - @Override - public boolean isOpen() - { - return tx.isOpen(); - } - - private void terminateConnectionOnThreadInterrupt( String reason ) - { - tx.connection().terminateAndRelease( reason ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/RoutingErrorHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/RoutingErrorHandler.java deleted file mode 100644 index b59b42a8..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/RoutingErrorHandler.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -/** - * Interface used for tracking errors when connected to a cluster. - */ -public interface RoutingErrorHandler -{ - void onConnectionFailure( BoltServerAddress address ); - - void onWriteFailure( BoltServerAddress address ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/SessionFactory.java b/src/graiph-driver/java/org/neo4j/driver/internal/SessionFactory.java deleted file mode 100644 index 28639806..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/SessionFactory.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.async.NetworkSession; - -public interface SessionFactory -{ - NetworkSession newInstance( SessionParameters parameters ); - - CompletionStage verifyConnectivity(); - - CompletionStage close(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/SessionFactoryImpl.java b/src/graiph-driver/java/org/neo4j/driver/internal/SessionFactoryImpl.java deleted file mode 100644 index 37ee2543..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/SessionFactoryImpl.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Config; -import org.neo4j.driver.Logging; -import org.neo4j.driver.internal.async.NetworkSession; -import org.neo4j.driver.internal.async.LeakLoggingNetworkSession; -import org.neo4j.driver.internal.retry.RetryLogic; -import org.neo4j.driver.internal.spi.ConnectionProvider; - -public class SessionFactoryImpl implements SessionFactory -{ - private final ConnectionProvider connectionProvider; - private final RetryLogic retryLogic; - private final Logging logging; - private final boolean leakedSessionsLoggingEnabled; - - SessionFactoryImpl( ConnectionProvider connectionProvider, RetryLogic retryLogic, Config config ) - { - this.connectionProvider = connectionProvider; - this.leakedSessionsLoggingEnabled = config.logLeakedSessions(); - this.retryLogic = retryLogic; - this.logging = config.logging(); - } - - @Override - public NetworkSession newInstance( SessionParameters parameters ) - { - BookmarksHolder bookmarksHolder = new DefaultBookmarksHolder( Bookmarks.from( parameters.bookmarks() ) ); - return createSession( connectionProvider, retryLogic, parameters.database(), parameters.defaultAccessMode(), bookmarksHolder, logging ); - } - - @Override - public CompletionStage verifyConnectivity() - { - return connectionProvider.verifyConnectivity(); - } - - @Override - public CompletionStage close() - { - return connectionProvider.close(); - } - - /** - * Get the underlying connection provider. - *

- * This method is only for testing - * - * @return the connection provider used by this factory. - */ - public ConnectionProvider getConnectionProvider() - { - return connectionProvider; - } - - private NetworkSession createSession( ConnectionProvider connectionProvider, RetryLogic retryLogic, String databaseName, AccessMode mode, - BookmarksHolder bookmarksHolder, Logging logging ) - { - return leakedSessionsLoggingEnabled - ? new LeakLoggingNetworkSession( connectionProvider, retryLogic, databaseName, mode, bookmarksHolder, logging ) - : new NetworkSession( connectionProvider, retryLogic, databaseName, mode, bookmarksHolder, logging ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/SessionParameters.java b/src/graiph-driver/java/org/neo4j/driver/internal/SessionParameters.java deleted file mode 100644 index 8a245a81..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/SessionParameters.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal; - -import java.util.Arrays; -import java.util.List; -import java.util.Objects; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.SessionParametersTemplate; - -import static org.neo4j.driver.internal.messaging.request.MultiDatabaseUtil.ABSENT_DB_NAME; - -/** - * The session parameters used to configure a session. - */ -public class SessionParameters -{ - private static final SessionParameters EMPTY = template().build(); - - private final List bookmarks; - private final AccessMode defaultAccessMode; - private final String database; - - /** - * Creates a session parameter template. - * @return a session parameter template. - */ - public static Template template() - { - return new Template(); - } - - /** - * Returns a static {@link SessionParameters} with default values for a general purpose session. - * @return a session parameter for a general purpose session. - */ - public static SessionParameters empty() - { - return EMPTY; - } - - private SessionParameters( Template template ) - { - this.bookmarks = template.bookmarks; - this.defaultAccessMode = template.defaultAccessMode; - this.database = template.database; - } - - /** - * Returns the initial bookmarks. - * First transaction in the session created with this {@link SessionParameters} - * will ensure that server hosting is at least as up-to-date as the - * latest transaction referenced by the supplied initial bookmarks. - * @return the initial bookmarks. - */ - public List bookmarks() - { - return bookmarks; - } - - /** - * The type of access required by units of work in this session, - * e.g. {@link AccessMode#READ read access} or {@link AccessMode#WRITE write access}. - * @return the access mode. - */ - public AccessMode defaultAccessMode() - { - return defaultAccessMode; - } - - /** - * The database where the session is going to connect to. - * @return the database name where the session is going to connect to. - */ - public String database() - { - return database; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - SessionParameters that = (SessionParameters) o; - return Objects.equals( bookmarks, that.bookmarks ) && defaultAccessMode == that.defaultAccessMode && database.equals( that.database ); - } - - @Override - public int hashCode() - { - return Objects.hash( bookmarks, defaultAccessMode, database ); - } - - @Override - public String toString() - { - return "SessionParameters{" + "bookmarks=" + bookmarks + ", defaultAccessMode=" + defaultAccessMode + ", database='" + database + '\'' + '}'; - } - - public static class Template implements SessionParametersTemplate - { - private List bookmarks = null; - private AccessMode defaultAccessMode = AccessMode.WRITE; - private String database = ABSENT_DB_NAME; - - private Template() - { - } - - @Override - public Template withBookmarks( String... bookmarks ) - { - if ( bookmarks == null ) - { - this.bookmarks = null; - } - else - { - this.bookmarks = Arrays.asList( bookmarks ); - } - return this; - } - - @Override - public Template withBookmarks( List bookmarks ) - { - this.bookmarks = bookmarks; - return this; - } - - @Override - public Template withDefaultAccessMode( AccessMode mode ) - { - this.defaultAccessMode = mode; - return this; - } - - @Override - public Template withDatabase( String database ) - { - Objects.requireNonNull( database, "Database cannot be null." ); - this.database = database; - return this; - } - - SessionParameters build() - { - return new SessionParameters( this ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/AsyncAbstractStatementRunner.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/AsyncAbstractStatementRunner.java deleted file mode 100644 index d2a7df9e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/AsyncAbstractStatementRunner.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async; - -import java.util.Map; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import org.neo4j.driver.async.AsyncStatementRunner; -import org.neo4j.driver.async.StatementResultCursor; - -import static org.neo4j.driver.internal.AbstractStatementRunner.parameters; - -public abstract class AsyncAbstractStatementRunner implements AsyncStatementRunner -{ - @Override - public final CompletionStage runAsync( String statementTemplate, Value parameters ) - { - return runAsync( new Statement( statementTemplate, parameters ) ); - } - - @Override - public final CompletionStage runAsync( String statementTemplate, Map statementParameters ) - { - return runAsync( statementTemplate, parameters( statementParameters ) ); - } - - @Override - public final CompletionStage runAsync( String statementTemplate, Record statementParameters ) - { - return runAsync( statementTemplate, parameters( statementParameters ) ); - } - - @Override - public final CompletionStage runAsync( String statementText ) - { - return runAsync( statementText, Values.EmptyMap ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/AsyncStatementResultCursor.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/AsyncStatementResultCursor.java deleted file mode 100644 index dd82c5f2..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/AsyncStatementResultCursor.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async; - -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Consumer; -import java.util.function.Function; - -import org.neo4j.driver.internal.handlers.PullAllResponseHandler; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.internal.cursor.InternalStatementResultCursor; -import org.neo4j.driver.Record; -import org.neo4j.driver.exceptions.NoSuchRecordException; -import org.neo4j.driver.summary.ResultSummary; - -public class AsyncStatementResultCursor implements InternalStatementResultCursor -{ - private final RunResponseHandler runResponseHandler; - private final PullAllResponseHandler pullAllHandler; - - public AsyncStatementResultCursor( RunResponseHandler runResponseHandler, PullAllResponseHandler pullAllHandler ) - { - this.runResponseHandler = runResponseHandler; - this.pullAllHandler = pullAllHandler; - } - - @Override - public List keys() - { - return runResponseHandler.statementKeys(); - } - - @Override - public CompletionStage summaryAsync() - { - return pullAllHandler.summaryAsync(); - } - - @Override - public CompletionStage nextAsync() - { - return pullAllHandler.nextAsync(); - } - - @Override - public CompletionStage peekAsync() - { - return pullAllHandler.peekAsync(); - } - - @Override - public CompletionStage singleAsync() - { - return nextAsync().thenCompose( firstRecord -> - { - if ( firstRecord == null ) - { - throw new NoSuchRecordException( - "Cannot retrieve a single record, because this result is empty." ); - } - return nextAsync().thenApply( secondRecord -> - { - if ( secondRecord != null ) - { - throw new NoSuchRecordException( - "Expected a result with a single record, but this result " + - "contains at least one more. Ensure your query returns only " + - "one record." ); - } - return firstRecord; - } ); - } ); - } - - @Override - public CompletionStage consumeAsync() - { - return pullAllHandler.consumeAsync(); - } - - @Override - public CompletionStage forEachAsync( Consumer action ) - { - CompletableFuture resultFuture = new CompletableFuture<>(); - internalForEachAsync( action, resultFuture ); - return resultFuture.thenCompose( ignore -> summaryAsync() ); - } - - @Override - public CompletionStage> listAsync() - { - return listAsync( Function.identity() ); - } - - @Override - public CompletionStage> listAsync( Function mapFunction ) - { - return pullAllHandler.listAsync( mapFunction ); - } - - @Override - public CompletionStage failureAsync() - { - return pullAllHandler.failureAsync(); - } - - private void internalForEachAsync( Consumer action, CompletableFuture resultFuture ) - { - CompletionStage recordFuture = nextAsync(); - - // use async completion listener because of recursion, otherwise it is possible for - // the caller thread to get StackOverflowError when result is large and buffered - recordFuture.whenCompleteAsync( ( record, completionError ) -> - { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( error != null ) - { - resultFuture.completeExceptionally( error ); - } - else if ( record != null ) - { - try - { - action.accept( record ); - } - catch ( Throwable actionError ) - { - resultFuture.completeExceptionally( actionError ); - return; - } - internalForEachAsync( action, resultFuture ); - } - else - { - resultFuture.complete( null ); - } - } ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/ExplicitTransaction.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/ExplicitTransaction.java deleted file mode 100644 index fc1e8482..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/ExplicitTransaction.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async; - -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.function.BiFunction; - -import org.neo4j.driver.Session; -import org.neo4j.driver.Statement; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.async.StatementResultCursor; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.internal.Bookmarks; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.cursor.InternalStatementResultCursor; -import org.neo4j.driver.internal.cursor.RxStatementResultCursor; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.Futures; - -import static org.neo4j.driver.internal.util.Futures.completedWithNull; -import static org.neo4j.driver.internal.util.Futures.failedFuture; - -public class ExplicitTransaction -{ - private enum State - { - /** The transaction is running with no explicit success or failure marked */ - ACTIVE, - - /** Running, user marked for success, meaning it'll value committed */ - MARKED_SUCCESS, - - /** User marked as failed, meaning it'll be rolled back. */ - MARKED_FAILED, - - /** - * This transaction has been terminated either because of explicit {@link Session#reset()} or because of a - * fatal connection error. - */ - TERMINATED, - - /** This transaction has successfully committed */ - COMMITTED, - - /** This transaction has been rolled back */ - ROLLED_BACK - } - - private final Connection connection; - private final BoltProtocol protocol; - private final BookmarksHolder bookmarksHolder; - private final ResultCursorsHolder resultCursors; - - private volatile State state = State.ACTIVE; - - public ExplicitTransaction( Connection connection, BookmarksHolder bookmarksHolder ) - { - this.connection = connection; - this.protocol = connection.protocol(); - this.bookmarksHolder = bookmarksHolder; - this.resultCursors = new ResultCursorsHolder(); - } - - public CompletionStage beginAsync( Bookmarks initialBookmarks, TransactionConfig config ) - { - return protocol.beginTransaction( connection, initialBookmarks, config ) - .handle( ( ignore, beginError ) -> - { - if ( beginError != null ) - { - // release connection if begin failed, transaction can't be started - connection.release(); - throw Futures.asCompletionException( beginError ); - } - return this; - } ); - } - - public void success() - { - if ( state == State.ACTIVE ) - { - state = State.MARKED_SUCCESS; - } - } - - public void failure() - { - if ( state == State.ACTIVE || state == State.MARKED_SUCCESS ) - { - state = State.MARKED_FAILED; - } - } - - public CompletionStage closeAsync() - { - if ( state == State.MARKED_SUCCESS ) - { - return commitAsync(); - } - else if ( state != State.COMMITTED && state != State.ROLLED_BACK ) - { - return rollbackAsync(); - } - else - { - return completedWithNull(); - } - } - - public CompletionStage commitAsync() - { - if ( state == State.COMMITTED ) - { - return completedWithNull(); - } - else if ( state == State.ROLLED_BACK ) - { - return failedFuture( new ClientException( "Can't commit, transaction has been rolled back" ) ); - } - else - { - return resultCursors.retrieveNotConsumedError() - .thenCompose( error -> doCommitAsync().handle( handleCommitOrRollback( error ) ) ) - .whenComplete( ( ignore, error ) -> transactionClosed( error == null ) ); - } - } - - public CompletionStage rollbackAsync() - { - if ( state == State.COMMITTED ) - { - return failedFuture( new ClientException( "Can't rollback, transaction has been committed" ) ); - } - else if ( state == State.ROLLED_BACK ) - { - return completedWithNull(); - } - else - { - return resultCursors.retrieveNotConsumedError() - .thenCompose( error -> doRollbackAsync().handle( handleCommitOrRollback( error ) ) ) - .whenComplete( ( ignore, error ) -> transactionClosed( false ) ); - } - } - - public CompletionStage runAsync( Statement statement, boolean waitForRunResponse ) - { - ensureCanRunQueries(); - CompletionStage cursorStage = - protocol.runInExplicitTransaction( connection, statement, this, waitForRunResponse ).asyncResult(); - resultCursors.add( cursorStage ); - return cursorStage.thenApply( cursor -> cursor ); - } - - public CompletionStage runRx( Statement statement ) - { - ensureCanRunQueries(); - CompletionStage cursorStage = - protocol.runInExplicitTransaction( connection, statement, this, false ).rxResult(); - resultCursors.add( cursorStage ); - return cursorStage; - } - - public boolean isOpen() - { - return state != State.COMMITTED && state != State.ROLLED_BACK; - } - - public void markTerminated() - { - state = State.TERMINATED; - } - - public Connection connection() - { - return connection; - } - - private void ensureCanRunQueries() - { - if ( state == State.COMMITTED ) - { - throw new ClientException( "Cannot run more statements in this transaction, it has been committed" ); - } - else if ( state == State.ROLLED_BACK ) - { - throw new ClientException( "Cannot run more statements in this transaction, it has been rolled back" ); - } - else if ( state == State.MARKED_FAILED ) - { - throw new ClientException( "Cannot run more statements in this transaction, it has been marked for failure. " + - "Please either rollback or close this transaction" ); - } - else if ( state == State.TERMINATED ) - { - throw new ClientException( "Cannot run more statements in this transaction, " + - "it has either experienced an fatal error or was explicitly terminated" ); - } - } - - private CompletionStage doCommitAsync() - { - if ( state == State.TERMINATED ) - { - return failedFuture( new ClientException( "Transaction can't be committed. " + - "It has been rolled back either because of an error or explicit termination" ) ); - } - return protocol.commitTransaction( connection ).thenAccept( bookmarksHolder::setBookmarks ); - } - - private CompletionStage doRollbackAsync() - { - if ( state == State.TERMINATED ) - { - return completedWithNull(); - } - return protocol.rollbackTransaction( connection ); - } - - private static BiFunction handleCommitOrRollback( Throwable cursorFailure ) - { - return ( ignore, commitOrRollbackError ) -> - { - CompletionException combinedError = Futures.combineErrors( cursorFailure, commitOrRollbackError ); - if ( combinedError != null ) - { - throw combinedError; - } - return null; - }; - } - - private void transactionClosed( boolean isCommitted ) - { - if ( isCommitted ) - { - state = State.COMMITTED; - } - else - { - state = State.ROLLED_BACK; - } - connection.release(); // release in background - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/InternalAsyncSession.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/InternalAsyncSession.java deleted file mode 100644 index 7dc5837f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/InternalAsyncSession.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Statement; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.async.AsyncSession; -import org.neo4j.driver.async.AsyncTransaction; -import org.neo4j.driver.async.AsyncTransactionWork; -import org.neo4j.driver.async.StatementResultCursor; -import org.neo4j.driver.internal.util.Futures; - -import static java.util.Collections.emptyMap; -import static org.neo4j.driver.internal.util.Futures.completedWithNull; -import static org.neo4j.driver.internal.util.Futures.failedFuture; - -public class InternalAsyncSession extends AsyncAbstractStatementRunner implements AsyncSession -{ - private final NetworkSession session; - - public InternalAsyncSession( NetworkSession session ) - { - this.session = session; - } - - @Override - public CompletionStage runAsync( Statement statement ) - { - return runAsync( statement, TransactionConfig.empty() ); - } - - @Override - public CompletionStage runAsync( String statement, TransactionConfig config ) - { - return runAsync( statement, emptyMap(), config ); - } - - @Override - public CompletionStage runAsync( String statement, Map parameters, TransactionConfig config ) - { - return runAsync( new Statement( statement, parameters ), config ); - } - - @Override - public CompletionStage runAsync( Statement statement, TransactionConfig config ) - { - return session.runAsync( statement, config, true ); - } - - @Override - public CompletionStage closeAsync() - { - return session.closeAsync(); - } - - @Override - public CompletionStage beginTransactionAsync() - { - return beginTransactionAsync( TransactionConfig.empty() ); - } - - @Override - public CompletionStage beginTransactionAsync( TransactionConfig config ) - { - return session.beginTransactionAsync( config ).thenApply( InternalAsyncTransaction::new ); - } - - @Override - public CompletionStage readTransactionAsync( AsyncTransactionWork> work ) - { - return readTransactionAsync( work, TransactionConfig.empty() ); - } - - @Override - public CompletionStage readTransactionAsync( AsyncTransactionWork> work, TransactionConfig config ) - { - return transactionAsync( AccessMode.READ, work, config ); - } - - @Override - public CompletionStage writeTransactionAsync( AsyncTransactionWork> work ) - { - return writeTransactionAsync( work, TransactionConfig.empty() ); - } - - @Override - public CompletionStage writeTransactionAsync( AsyncTransactionWork> work, TransactionConfig config ) - { - return transactionAsync( AccessMode.WRITE, work, config ); - } - - @Override - public String lastBookmark() - { - return session.lastBookmark(); - } - - private CompletionStage transactionAsync( AccessMode mode, AsyncTransactionWork> work, TransactionConfig config ) - { - return session.retryLogic().retryAsync( () -> { - CompletableFuture resultFuture = new CompletableFuture<>(); - CompletionStage txFuture = session.beginTransactionAsync( mode, config ); - - txFuture.whenComplete( ( tx, completionError ) -> { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( error != null ) - { - resultFuture.completeExceptionally( error ); - } - else - { - executeWork( resultFuture, tx, work ); - } - } ); - - return resultFuture; - } ); - } - - private void executeWork( CompletableFuture resultFuture, ExplicitTransaction tx, AsyncTransactionWork> work ) - { - CompletionStage workFuture = safeExecuteWork( tx, work ); - workFuture.whenComplete( ( result, completionError ) -> { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( error != null ) - { - rollbackTxAfterFailedTransactionWork( tx, resultFuture, error ); - } - else - { - closeTxAfterSucceededTransactionWork( tx, resultFuture, result ); - } - } ); - } - - private CompletionStage safeExecuteWork( ExplicitTransaction tx, AsyncTransactionWork> work ) - { - // given work might fail in both async and sync way - // async failure will result in a failed future being returned - // sync failure will result in an exception being thrown - try - { - CompletionStage result = work.execute( new InternalAsyncTransaction( tx ) ); - - // protect from given transaction function returning null - return result == null ? completedWithNull() : result; - } - catch ( Throwable workError ) - { - // work threw an exception, wrap it in a future and proceed - return failedFuture( workError ); - } - } - - private void rollbackTxAfterFailedTransactionWork( ExplicitTransaction tx, CompletableFuture resultFuture, Throwable error ) - { - if ( tx.isOpen() ) - { - tx.rollbackAsync().whenComplete( ( ignore, rollbackError ) -> { - if ( rollbackError != null ) - { - error.addSuppressed( rollbackError ); - } - resultFuture.completeExceptionally( error ); - } ); - } - else - { - resultFuture.completeExceptionally( error ); - } - } - - private void closeTxAfterSucceededTransactionWork( ExplicitTransaction tx, CompletableFuture resultFuture, T result ) - { - if ( tx.isOpen() ) - { - tx.success(); - tx.closeAsync().whenComplete( ( ignore, completionError ) -> { - Throwable commitError = Futures.completionExceptionCause( completionError ); - if ( commitError != null ) - { - resultFuture.completeExceptionally( commitError ); - } - else - { - resultFuture.complete( result ); - } - } ); - } - else - { - resultFuture.complete( result ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/InternalAsyncTransaction.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/InternalAsyncTransaction.java deleted file mode 100644 index 75582f6b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/InternalAsyncTransaction.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.async.AsyncTransaction; -import org.neo4j.driver.async.StatementResultCursor; - -public class InternalAsyncTransaction extends AsyncAbstractStatementRunner implements AsyncTransaction -{ - private final ExplicitTransaction tx; - public InternalAsyncTransaction( ExplicitTransaction tx ) - { - this.tx = tx; - } - - @Override - public CompletionStage commitAsync() - { - return tx.commitAsync(); - } - - @Override - public CompletionStage rollbackAsync() - { - return tx.rollbackAsync(); - } - - @Override - public CompletionStage runAsync( Statement statement ) - { - return tx.runAsync( statement, true ); - } - - public void markTerminated() - { - tx.markTerminated(); - } - - public boolean isOpen() - { - return tx.isOpen(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/LeakLoggingNetworkSession.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/LeakLoggingNetworkSession.java deleted file mode 100644 index c50f0130..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/LeakLoggingNetworkSession.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Logging; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.retry.RetryLogic; -import org.neo4j.driver.internal.spi.ConnectionProvider; -import org.neo4j.driver.internal.util.Futures; - -import static java.lang.System.lineSeparator; - -public class LeakLoggingNetworkSession extends NetworkSession -{ - private final String stackTrace; - - public LeakLoggingNetworkSession( ConnectionProvider connectionProvider, RetryLogic retryLogic, String databaseName, AccessMode mode, - BookmarksHolder bookmarksHolder, Logging logging ) - { - super( connectionProvider, retryLogic, databaseName, mode, bookmarksHolder, logging ); - this.stackTrace = captureStackTrace(); - } - - @Override - protected void finalize() throws Throwable - { - logLeakIfNeeded(); - super.finalize(); - } - - private void logLeakIfNeeded() - { - Boolean isOpen = Futures.blockingGet( currentConnectionIsOpen() ); - if ( isOpen ) - { - logger.error( "Neo4j Session object leaked, please ensure that your application" + - "calls the `close` method on Sessions before disposing of the objects.\n" + - "Session was create at:\n" + stackTrace, null ); - } - } - private static String captureStackTrace() - { - StringBuilder result = new StringBuilder(); - StackTraceElement[] elements = Thread.currentThread().getStackTrace(); - for ( StackTraceElement element : elements ) - { - result.append( "\t" ).append( element ).append( lineSeparator() ); - } - return result.toString(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/NetworkSession.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/NetworkSession.java deleted file mode 100644 index 09c21d99..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/NetworkSession.java +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async; - -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.Statement; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.async.StatementResultCursor; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.FailableCursor; -import org.neo4j.driver.internal.cursor.InternalStatementResultCursor; -import org.neo4j.driver.internal.cursor.RxStatementResultCursor; -import org.neo4j.driver.internal.cursor.StatementResultCursorFactory; -import org.neo4j.driver.internal.logging.PrefixedLogger; -import org.neo4j.driver.internal.retry.RetryLogic; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ConnectionProvider; -import org.neo4j.driver.internal.util.Futures; - -import static java.util.concurrent.CompletableFuture.completedFuture; -import static org.neo4j.driver.internal.util.Futures.completedWithNull; - -public class NetworkSession -{ - private static final String LOG_NAME = "Session"; - - private final ConnectionProvider connectionProvider; - private final AccessMode mode; - private final String databaseName; - private final RetryLogic retryLogic; - protected final Logger logger; - - private final BookmarksHolder bookmarksHolder; - private volatile CompletionStage transactionStage = completedWithNull(); - private volatile CompletionStage connectionStage = completedWithNull(); - private volatile CompletionStage resultCursorStage = completedWithNull(); - - private final AtomicBoolean open = new AtomicBoolean( true ); - - public NetworkSession( ConnectionProvider connectionProvider, RetryLogic retryLogic, String databaseName, AccessMode mode, - BookmarksHolder bookmarksHolder, Logging logging ) - { - this.connectionProvider = connectionProvider; - this.mode = mode; - this.retryLogic = retryLogic; - this.logger = new PrefixedLogger( "[" + hashCode() + "]", logging.getLog( LOG_NAME ) ); - this.bookmarksHolder = bookmarksHolder; - this.databaseName = databaseName; - } - - public CompletionStage runAsync( Statement statement, TransactionConfig config, boolean waitForRunResponse ) - { - CompletionStage newResultCursorStage = - buildResultCursorFactory( statement, config, waitForRunResponse ).thenCompose( StatementResultCursorFactory::asyncResult ); - - resultCursorStage = newResultCursorStage.exceptionally( error -> null ); - return newResultCursorStage.thenApply( cursor -> cursor ); // convert the return type - } - - public CompletionStage runRx( Statement statement, TransactionConfig config ) - { - CompletionStage newResultCursorStage = - buildResultCursorFactory( statement, config, true ).thenCompose( StatementResultCursorFactory::rxResult ); - - resultCursorStage = newResultCursorStage.exceptionally( error -> null ); - return newResultCursorStage; - } - - public CompletionStage beginTransactionAsync( TransactionConfig config ) - { - return this.beginTransactionAsync( mode, config ); - } - - public CompletionStage beginTransactionAsync( AccessMode mode, TransactionConfig config ) - { - ensureSessionIsOpen(); - - // create a chain that acquires connection and starts a transaction - CompletionStage newTransactionStage = ensureNoOpenTxBeforeStartingTx() - .thenCompose( ignore -> acquireConnection( databaseName, mode ) ) - .thenCompose( connection -> - { - ExplicitTransaction tx = new ExplicitTransaction( connection, bookmarksHolder ); - return tx.beginAsync( bookmarksHolder.getBookmarks(), config ); - } ); - - // update the reference to the only known transaction - CompletionStage currentTransactionStage = transactionStage; - - transactionStage = newTransactionStage - .exceptionally( error -> null ) // ignore errors from starting new transaction - .thenCompose( tx -> - { - if ( tx == null ) - { - // failed to begin new transaction, keep reference to the existing one - return currentTransactionStage; - } - // new transaction started, keep reference to it - return completedFuture( tx ); - } ); - - return newTransactionStage; - } - - public CompletionStage resetAsync() - { - return existingTransactionOrNull() - .thenAccept( tx -> - { - if ( tx != null ) - { - tx.markTerminated(); - } - } ) - .thenCompose( ignore -> connectionStage ) - .thenCompose( connection -> - { - if ( connection != null ) - { - // there exists an active connection, send a RESET message over it - return connection.reset(); - } - return completedWithNull(); - } ); - } - - public RetryLogic retryLogic() - { - return retryLogic; - } - - public String lastBookmark() - { - return bookmarksHolder.lastBookmark(); - } - - public CompletionStage releaseConnectionAsync() - { - return connectionStage.thenCompose( connection -> - { - if ( connection != null ) - { - // there exists connection, try to release it back to the pool - return connection.release(); - } - // no connection so return null - return completedWithNull(); - } ); - } - - public CompletionStage connectionAsync() - { - return connectionStage; - } - - public boolean isOpen() - { - return open.get(); - } - - public CompletionStage closeAsync() - { - if ( open.compareAndSet( true, false ) ) - { - return resultCursorStage.thenCompose( cursor -> - { - if ( cursor != null ) - { - // there exists a cursor with potentially unconsumed error, try to extract and propagate it - return cursor.failureAsync(); - } - // no result cursor exists so no error exists - return completedWithNull(); - } ).thenCompose( cursorError -> closeTransactionAndReleaseConnection().thenApply( txCloseError -> - { - // now we have cursor error, active transaction has been closed and connection has been released - // back to the pool; try to propagate cursor and transaction close errors, if any - CompletionException combinedError = Futures.combineErrors( cursorError, txCloseError ); - if ( combinedError != null ) - { - throw combinedError; - } - return null; - } ) ); - } - return completedWithNull(); - } - - protected CompletionStage currentConnectionIsOpen() - { - return connectionStage.handle( ( connection, error ) -> - error == null && // no acquisition error - connection != null && // some connection has actually been acquired - connection.isOpen() ); // and it's still open - } - - private CompletionStage buildResultCursorFactory( Statement statement, TransactionConfig config, boolean waitForRunResponse ) - { - ensureSessionIsOpen(); - - return ensureNoOpenTxBeforeRunningQuery() - .thenCompose( ignore -> acquireConnection( databaseName, mode ) ) - .thenCompose( connection -> { - try - { - StatementResultCursorFactory factory = connection.protocol() - .runInAutoCommitTransaction( connection, statement, bookmarksHolder, config, waitForRunResponse ); - return completedFuture( factory ); - } - catch ( Throwable e ) - { - return Futures.failedFuture( e ); - } - } ); - } - - private CompletionStage acquireConnection( String databaseName, AccessMode mode ) - { - CompletionStage currentConnectionStage = connectionStage; - - CompletionStage newConnectionStage = resultCursorStage.thenCompose( cursor -> - { - if ( cursor == null ) - { - return completedWithNull(); - } - // make sure previous result is fully consumed and connection is released back to the pool - return cursor.failureAsync(); - } ).thenCompose( error -> - { - if ( error == null ) - { - // there is no unconsumed error, so one of the following is true: - // 1) this is first time connection is acquired in this session - // 2) previous result has been successful and is fully consumed - // 3) previous result failed and error has been consumed - - // return existing connection, which should've been released back to the pool by now - return currentConnectionStage.exceptionally( ignore -> null ); - } - else - { - // there exists unconsumed error, re-throw it - throw new CompletionException( error ); - } - } ).thenCompose( existingConnection -> - { - if ( existingConnection != null && existingConnection.isOpen() ) - { - // there somehow is an existing open connection, this should not happen, just a precondition - throw new IllegalStateException( "Existing open connection detected" ); - } - return connectionProvider.acquireConnection( databaseName, mode ); - } ); - - connectionStage = newConnectionStage.exceptionally( error -> null ); - - return newConnectionStage; - } - - private CompletionStage closeTransactionAndReleaseConnection() - { - return existingTransactionOrNull().thenCompose( tx -> - { - if ( tx != null ) - { - // there exists an open transaction, let's close it and propagate the error, if any - return tx.closeAsync() - .thenApply( ignore -> (Throwable) null ) - .exceptionally( error -> error ); - } - // no open transaction so nothing to close - return completedWithNull(); - } ).thenCompose( txCloseError -> - // then release the connection and propagate transaction close error, if any - releaseConnectionAsync().thenApply( ignore -> txCloseError ) ); - } - - private CompletionStage ensureNoOpenTxBeforeRunningQuery() - { - return ensureNoOpenTx( "Statements cannot be run directly on a session with an open transaction; " + - "either run from within the transaction or use a different session." ); - } - - private CompletionStage ensureNoOpenTxBeforeStartingTx() - { - return ensureNoOpenTx( "You cannot begin a transaction on a session with an open transaction; " + - "either run from within the transaction or use a different session." ); - } - - private CompletionStage ensureNoOpenTx( String errorMessage ) - { - return existingTransactionOrNull().thenAccept( tx -> - { - if ( tx != null ) - { - throw new ClientException( errorMessage ); - } - } ); - } - - private CompletionStage existingTransactionOrNull() - { - return transactionStage - .exceptionally( error -> null ) // handle previous connection acquisition and tx begin failures - .thenApply( tx -> tx != null && tx.isOpen() ? tx : null ); - } - - private void ensureSessionIsOpen() - { - if ( !open.get() ) - { - throw new ClientException( - "No more interaction with this session are allowed as the current session is already closed. " ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/ResultCursorsHolder.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/ResultCursorsHolder.java deleted file mode 100644 index 319f576f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/ResultCursorsHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async; - -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.FailableCursor; - -import static org.neo4j.driver.internal.util.Futures.completedWithNull; - -public class ResultCursorsHolder -{ - private final List> cursorStages = new ArrayList<>(); - - public void add( CompletionStage cursorStage ) - { - Objects.requireNonNull( cursorStage ); - cursorStages.add( cursorStage ); - } - - CompletionStage retrieveNotConsumedError() - { - CompletableFuture[] failures = retrieveAllFailures(); - - return CompletableFuture.allOf( failures ) - .thenApply( ignore -> findFirstFailure( failures ) ); - } - - @SuppressWarnings( "unchecked" ) - private CompletableFuture[] retrieveAllFailures() - { - return cursorStages.stream() - .map( ResultCursorsHolder::retrieveFailure ) - .map( CompletionStage::toCompletableFuture ) - .toArray( CompletableFuture[]::new ); - } - - private static Throwable findFirstFailure( CompletableFuture[] completedFailureFutures ) - { - // all given futures should be completed, it is thus safe to get their values - - for ( CompletableFuture failureFuture : completedFailureFutures ) - { - Throwable failure = failureFuture.getNow( null ); // does not block - if ( failure != null ) - { - return failure; - } - } - return null; - } - - private static CompletionStage retrieveFailure( CompletionStage cursorStage ) - { - return cursorStage - .exceptionally( cursor -> null ) - .thenCompose( cursor -> cursor == null ? completedWithNull() : cursor.failureAsync() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/BoltProtocolUtil.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/BoltProtocolUtil.java deleted file mode 100644 index 8d43ceb6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/BoltProtocolUtil.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.buffer.ByteBuf; - -import org.neo4j.driver.internal.messaging.v1.BoltProtocolV1; -import org.neo4j.driver.internal.messaging.v2.BoltProtocolV2; -import org.neo4j.driver.internal.messaging.v3.BoltProtocolV3; -import org.neo4j.driver.internal.messaging.v4.BoltProtocolV4; -import org.neo4j.driver.internal.messaging.v5.BoltProtocolV5; - -import static io.netty.buffer.Unpooled.copyInt; -import static io.netty.buffer.Unpooled.unreleasableBuffer; -import static java.lang.Integer.toHexString; - -public final class BoltProtocolUtil -{ - public static final int HTTP = 1213486160; //== 0x48545450 == "HTTP" - - public static final int BOLT_MAGIC_PREAMBLE = 0x6060B017; - public static final int NO_PROTOCOL_VERSION = 0; - - public static final int CHUNK_HEADER_SIZE_BYTES = 2; - - public static final int DEFAULT_MAX_OUTBOUND_CHUNK_SIZE_BYTES = Short.MAX_VALUE / 2; - - private static final ByteBuf HANDSHAKE_BUF = unreleasableBuffer( copyInt( - BOLT_MAGIC_PREAMBLE, - BoltProtocolV5.VERSION, - BoltProtocolV4.VERSION, - BoltProtocolV3.VERSION, - BoltProtocolV2.VERSION - //,BoltProtocolV1.VERSION - ) ).asReadOnly(); - - private static final String HANDSHAKE_STRING = createHandshakeString(); - - private BoltProtocolUtil() - { - } - - public static ByteBuf handshakeBuf() - { - return HANDSHAKE_BUF.duplicate(); - } - - public static String handshakeString() - { - return HANDSHAKE_STRING; - } - - public static void writeMessageBoundary( ByteBuf buf ) - { - buf.writeShort( 0 ); - } - - public static void writeEmptyChunkHeader( ByteBuf buf ) - { - buf.writeShort( 0 ); - } - - public static void writeChunkHeader( ByteBuf buf, int chunkStartIndex, int headerValue ) - { - buf.setShort( chunkStartIndex, headerValue ); - } - - private static String createHandshakeString() - { - ByteBuf buf = handshakeBuf(); - return String.format( "[0x%s, %s, %s, %s, %s]", toHexString( buf.readInt() ), buf.readInt(), buf.readInt(), buf.readInt(), buf.readInt() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/BootstrapFactory.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/BootstrapFactory.java deleted file mode 100644 index 3262fba3..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/BootstrapFactory.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.ChannelOption; -import io.netty.channel.EventLoopGroup; - -public final class BootstrapFactory -{ - private BootstrapFactory() - { - } - - public static Bootstrap newBootstrap() - { - return newBootstrap( EventLoopGroupFactory.newEventLoopGroup() ); - } - - public static Bootstrap newBootstrap( int threadCount ) - { - return newBootstrap( EventLoopGroupFactory.newEventLoopGroup( threadCount ) ); - } - - private static Bootstrap newBootstrap( EventLoopGroup eventLoopGroup ) - { - Bootstrap bootstrap = new Bootstrap(); - bootstrap.group( eventLoopGroup ); - bootstrap.channel( EventLoopGroupFactory.channelClass() ); - bootstrap.option( ChannelOption.SO_KEEPALIVE, true ); - bootstrap.option( ChannelOption.SO_REUSEADDR, true ); - return bootstrap; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelAttributes.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelAttributes.java deleted file mode 100644 index 784f82b6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelAttributes.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.channel.Channel; -import io.netty.util.AttributeKey; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.inbound.InboundMessageDispatcher; -import org.neo4j.driver.internal.util.ServerVersion; - -import static io.netty.util.AttributeKey.newInstance; - -public final class ChannelAttributes -{ - private static final AttributeKey CONNECTION_ID = newInstance( "connectionId" ); - private static final AttributeKey PROTOCOL_VERSION = newInstance( "protocolVersion" ); - private static final AttributeKey ADDRESS = newInstance( "serverAddress" ); - private static final AttributeKey SERVER_VERSION = newInstance( "serverVersion" ); - private static final AttributeKey CREATION_TIMESTAMP = newInstance( "creationTimestamp" ); - private static final AttributeKey LAST_USED_TIMESTAMP = newInstance( "lastUsedTimestamp" ); - private static final AttributeKey MESSAGE_DISPATCHER = newInstance( "messageDispatcher" ); - private static final AttributeKey TERMINATION_REASON = newInstance( "terminationReason" ); - - private ChannelAttributes() - { - } - - public static String connectionId( Channel channel ) - { - return get( channel, CONNECTION_ID ); - } - - public static void setConnectionId( Channel channel, String id ) - { - setOnce( channel, CONNECTION_ID, id ); - } - - public static int protocolVersion( Channel channel ) - { - return get( channel, PROTOCOL_VERSION ); - } - - public static void setProtocolVersion( Channel channel, int version ) - { - setOnce( channel, PROTOCOL_VERSION, version ); - } - - public static BoltServerAddress serverAddress( Channel channel ) - { - return get( channel, ADDRESS ); - } - - public static void setServerAddress( Channel channel, BoltServerAddress address ) - { - setOnce( channel, ADDRESS, address ); - } - - public static ServerVersion serverVersion( Channel channel ) - { - return get( channel, SERVER_VERSION ); - } - - public static void setServerVersion( Channel channel, ServerVersion version ) - { - setOnce( channel, SERVER_VERSION, version ); - } - - public static long creationTimestamp( Channel channel ) - { - return get( channel, CREATION_TIMESTAMP ); - } - - public static void setCreationTimestamp( Channel channel, long creationTimestamp ) - { - setOnce( channel, CREATION_TIMESTAMP, creationTimestamp ); - } - - public static Long lastUsedTimestamp( Channel channel ) - { - return get( channel, LAST_USED_TIMESTAMP ); - } - - public static void setLastUsedTimestamp( Channel channel, long lastUsedTimestamp ) - { - set( channel, LAST_USED_TIMESTAMP, lastUsedTimestamp ); - } - - public static InboundMessageDispatcher messageDispatcher( Channel channel ) - { - return get( channel, MESSAGE_DISPATCHER ); - } - - public static void setMessageDispatcher( Channel channel, InboundMessageDispatcher messageDispatcher ) - { - setOnce( channel, MESSAGE_DISPATCHER, messageDispatcher ); - } - - public static String terminationReason( Channel channel ) - { - return get( channel, TERMINATION_REASON ); - } - - public static void setTerminationReason( Channel channel, String reason ) - { - setOnce( channel, TERMINATION_REASON, reason ); - } - - private static T get( Channel channel, AttributeKey key ) - { - return channel.attr( key ).get(); - } - - private static void set( Channel channel, AttributeKey key, T value ) - { - channel.attr( key ).set( value ); - } - - private static void setOnce( Channel channel, AttributeKey key, T value ) - { - T existingValue = channel.attr( key ).setIfAbsent( value ); - if ( existingValue != null ) - { - throw new IllegalStateException( - "Unable to set " + key.name() + " because it is already set to " + existingValue ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnectedListener.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnectedListener.java deleted file mode 100644 index 3a118044..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnectedListener.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.ChannelPromise; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.logging.ChannelActivityLogger; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.exceptions.ServiceUnavailableException; - -import static java.lang.String.format; -import static org.neo4j.driver.internal.async.connection.BoltProtocolUtil.handshakeBuf; -import static org.neo4j.driver.internal.async.connection.BoltProtocolUtil.handshakeString; - -public class ChannelConnectedListener implements ChannelFutureListener -{ - private final BoltServerAddress address; - private final ChannelPipelineBuilder pipelineBuilder; - private final ChannelPromise handshakeCompletedPromise; - private final Logging logging; - - public ChannelConnectedListener( BoltServerAddress address, ChannelPipelineBuilder pipelineBuilder, - ChannelPromise handshakeCompletedPromise, Logging logging ) - { - this.address = address; - this.pipelineBuilder = pipelineBuilder; - this.handshakeCompletedPromise = handshakeCompletedPromise; - this.logging = logging; - } - - @Override - public void operationComplete( ChannelFuture future ) - { - Channel channel = future.channel(); - Logger log = new ChannelActivityLogger( channel, logging, getClass() ); - - if ( future.isSuccess() ) - { - log.trace( "Channel %s connected, initiating bolt handshake", channel ); - - ChannelPipeline pipeline = channel.pipeline(); - pipeline.addLast( new HandshakeHandler( pipelineBuilder, handshakeCompletedPromise, logging ) ); - log.debug( "C: [Bolt Handshake] %s", handshakeString() ); - channel.writeAndFlush( handshakeBuf(), channel.voidPromise() ); - } - else - { - handshakeCompletedPromise.setFailure( databaseUnavailableError( address, future.cause() ) ); - } - } - - private static Throwable databaseUnavailableError( BoltServerAddress address, Throwable cause ) - { - return new ServiceUnavailableException( format( - "Unable to connect to %s, ensure the database is running and that there " + - "is a working network connection to it.", address ), cause ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnector.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnector.java deleted file mode 100644 index 6ebf0b42..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnector.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.ChannelFuture; - -import org.neo4j.driver.internal.BoltServerAddress; - -public interface ChannelConnector -{ - ChannelFuture connect( BoltServerAddress address, Bootstrap bootstrap ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnectorImpl.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnectorImpl.java deleted file mode 100644 index 76bde81a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelConnectorImpl.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.ChannelPromise; - -import java.util.Map; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.ConnectionSettings; -import org.neo4j.driver.internal.async.inbound.ConnectTimeoutHandler; -import org.neo4j.driver.internal.security.InternalAuthToken; -import org.neo4j.driver.internal.security.SecurityPlan; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.AuthToken; -import org.neo4j.driver.AuthTokens; -import org.neo4j.driver.Logging; -import org.neo4j.driver.Value; -import org.neo4j.driver.exceptions.ClientException; - -import static java.util.Objects.requireNonNull; - -public class ChannelConnectorImpl implements ChannelConnector -{ - private final String userAgent; - private final Map authToken; - private final SecurityPlan securityPlan; - private final ChannelPipelineBuilder pipelineBuilder; - private final int connectTimeoutMillis; - private final Logging logging; - private final Clock clock; - - public ChannelConnectorImpl( ConnectionSettings connectionSettings, SecurityPlan securityPlan, Logging logging, - Clock clock ) - { - this( connectionSettings, securityPlan, new ChannelPipelineBuilderImpl(), logging, clock ); - } - - public ChannelConnectorImpl( ConnectionSettings connectionSettings, SecurityPlan securityPlan, - ChannelPipelineBuilder pipelineBuilder, Logging logging, Clock clock ) - { - this.userAgent = connectionSettings.userAgent(); - this.authToken = tokenAsMap( connectionSettings.authToken() ); - this.connectTimeoutMillis = connectionSettings.connectTimeoutMillis(); - this.securityPlan = requireNonNull( securityPlan ); - this.pipelineBuilder = pipelineBuilder; - this.logging = requireNonNull( logging ); - this.clock = requireNonNull( clock ); - } - - @Override - public ChannelFuture connect( BoltServerAddress address, Bootstrap bootstrap ) - { - bootstrap.option( ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMillis ); - bootstrap.handler( new NettyChannelInitializer( address, securityPlan, connectTimeoutMillis, clock, logging ) ); - - ChannelFuture channelConnected = bootstrap.connect( address.toSocketAddress() ); - - Channel channel = channelConnected.channel(); - ChannelPromise handshakeCompleted = channel.newPromise(); - ChannelPromise connectionInitialized = channel.newPromise(); - - installChannelConnectedListeners( address, channelConnected, handshakeCompleted ); - installHandshakeCompletedListeners( handshakeCompleted, connectionInitialized ); - - return connectionInitialized; - } - - private void installChannelConnectedListeners( BoltServerAddress address, ChannelFuture channelConnected, - ChannelPromise handshakeCompleted ) - { - ChannelPipeline pipeline = channelConnected.channel().pipeline(); - - // add timeout handler to the pipeline when channel is connected. it's needed to limit amount of time code - // spends in TLS and Bolt handshakes. prevents infinite waiting when database does not respond - channelConnected.addListener( future -> - pipeline.addFirst( new ConnectTimeoutHandler( connectTimeoutMillis ) ) ); - - // add listener that sends Bolt handshake bytes when channel is connected - channelConnected.addListener( - new ChannelConnectedListener( address, pipelineBuilder, handshakeCompleted, logging ) ); - } - - private void installHandshakeCompletedListeners( ChannelPromise handshakeCompleted, - ChannelPromise connectionInitialized ) - { - ChannelPipeline pipeline = handshakeCompleted.channel().pipeline(); - - // remove timeout handler from the pipeline once TLS and Bolt handshakes are completed. regular protocol - // messages will flow next and we do not want to have read timeout for them - handshakeCompleted.addListener( future -> pipeline.remove( ConnectTimeoutHandler.class ) ); - - // add listener that sends an INIT message. connection is now fully established. channel pipeline if fully - // set to send/receive messages for a selected protocol version - handshakeCompleted.addListener( new HandshakeCompletedListener( userAgent, authToken, connectionInitialized ) ); - } - - private static Map tokenAsMap( AuthToken token ) - { - if ( token instanceof InternalAuthToken ) - { - return ((InternalAuthToken) token).toMap(); - } - else - { - throw new ClientException( - "Unknown authentication token, `" + token + "`. Please use one of the supported " + - "tokens from `" + AuthTokens.class.getSimpleName() + "`." ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelPipelineBuilder.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelPipelineBuilder.java deleted file mode 100644 index 9c7c2d97..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelPipelineBuilder.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.channel.ChannelPipeline; - -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.Logging; - -public interface ChannelPipelineBuilder -{ - void build( MessageFormat messageFormat, ChannelPipeline pipeline, Logging logging ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelPipelineBuilderImpl.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelPipelineBuilderImpl.java deleted file mode 100644 index 98535fdb..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/ChannelPipelineBuilderImpl.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.channel.ChannelPipeline; - -import org.neo4j.driver.internal.async.inbound.ChannelErrorHandler; -import org.neo4j.driver.internal.async.inbound.ChunkDecoder; -import org.neo4j.driver.internal.async.inbound.InboundMessageHandler; -import org.neo4j.driver.internal.async.inbound.MessageDecoder; -import org.neo4j.driver.internal.async.outbound.OutboundMessageHandler; -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.Logging; - -public class ChannelPipelineBuilderImpl implements ChannelPipelineBuilder -{ - @Override - public void build( MessageFormat messageFormat, ChannelPipeline pipeline, Logging logging ) - { - // inbound handlers - pipeline.addLast( new ChunkDecoder( logging ) ); - pipeline.addLast( new MessageDecoder() ); - pipeline.addLast( new InboundMessageHandler( messageFormat, logging ) ); - - // outbound handlers - pipeline.addLast( OutboundMessageHandler.NAME, new OutboundMessageHandler( messageFormat, logging ) ); - - // last one - error handler - pipeline.addLast( new ChannelErrorHandler( logging ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/DecoratedConnection.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/DecoratedConnection.java deleted file mode 100644 index 29cacaed..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/DecoratedConnection.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.ServerVersion; -import org.neo4j.driver.AccessMode; - -/** - * This is a connection with extra parameters such as database name and access mode - */ -public class DecoratedConnection implements Connection -{ - private final Connection delegate; - private final AccessMode mode; - private final String databaseName; - - public DecoratedConnection( Connection delegate, String databaseName, AccessMode mode ) - { - this.delegate = delegate; - this.mode = mode; - this.databaseName = databaseName; - } - - public Connection connection() - { - return delegate; - } - - @Override - public boolean isOpen() - { - return delegate.isOpen(); - } - - @Override - public void enableAutoRead() - { - delegate.enableAutoRead(); - } - - @Override - public void disableAutoRead() - { - delegate.disableAutoRead(); - } - - @Override - public void write( Message message, ResponseHandler handler ) - { - delegate.write( message, handler ); - } - - @Override - public void write( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2 ) - { - delegate.write( message1, handler1, message2, handler2 ); - } - - @Override - public void writeAndFlush( Message message, ResponseHandler handler ) - { - delegate.writeAndFlush( message, handler ); - } - - @Override - public void writeAndFlush( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2 ) - { - delegate.writeAndFlush( message1, handler1, message2, handler2 ); - } - - @Override - public CompletionStage reset() - { - return delegate.reset(); - } - - @Override - public CompletionStage release() - { - return delegate.release(); - } - - @Override - public void terminateAndRelease( String reason ) - { - delegate.terminateAndRelease( reason ); - } - - @Override - public BoltServerAddress serverAddress() - { - return delegate.serverAddress(); - } - - @Override - public ServerVersion serverVersion() - { - return delegate.serverVersion(); - } - - @Override - public BoltProtocol protocol() - { - return delegate.protocol(); - } - - @Override - public AccessMode mode() - { - return mode; - } - - @Override - public String databaseName() - { - return this.databaseName; - } - - @Override - public void flush() - { - delegate.flush(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/DirectConnection.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/DirectConnection.java deleted file mode 100644 index 70820796..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/DirectConnection.java +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.channel.Channel; -import io.netty.channel.pool.ChannelPool; - -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicReference; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.inbound.InboundMessageDispatcher; -import org.neo4j.driver.internal.handlers.ChannelReleasingResetResponseHandler; -import org.neo4j.driver.internal.handlers.ResetResponseHandler; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.request.ResetMessage; -import org.neo4j.driver.internal.metrics.ListenerEvent; -import org.neo4j.driver.internal.metrics.MetricsListener; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.internal.util.ServerVersion; - -import static java.util.Collections.emptyMap; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.setTerminationReason; - -public class DirectConnection implements Connection -{ - private final Channel channel; - private final InboundMessageDispatcher messageDispatcher; - private final BoltServerAddress serverAddress; - private final ServerVersion serverVersion; - private final BoltProtocol protocol; - private final ChannelPool channelPool; - private final CompletableFuture releaseFuture; - private final Clock clock; - - private final AtomicReference status = new AtomicReference<>( Status.OPEN ); - private final MetricsListener metricsListener; - private final ListenerEvent inUseEvent; - - public DirectConnection( Channel channel, ChannelPool channelPool, Clock clock, MetricsListener metricsListener ) - { - this.channel = channel; - this.messageDispatcher = ChannelAttributes.messageDispatcher( channel ); - this.serverAddress = ChannelAttributes.serverAddress( channel ); - this.serverVersion = ChannelAttributes.serverVersion( channel ); - this.protocol = BoltProtocol.forChannel( channel ); - this.channelPool = channelPool; - this.releaseFuture = new CompletableFuture<>(); - this.clock = clock; - this.metricsListener = metricsListener; - this.inUseEvent = metricsListener.createListenerEvent(); - metricsListener.afterConnectionCreated( this.serverAddress, this.inUseEvent ); - } - - @Override - public boolean isOpen() - { - return status.get() == Status.OPEN; - } - - @Override - public void enableAutoRead() - { - if ( isOpen() ) - { - setAutoRead( true ); - } - } - - @Override - public void disableAutoRead() - { - if ( isOpen() ) - { - setAutoRead( false ); - } - } - - @Override - public void flush() - { - if ( verifyOpen( null, null ) ) - { - flushInEventLoop(); - } - } - - @Override - public void write( Message message, ResponseHandler handler ) - { - if ( verifyOpen( handler, null ) ) - { - writeMessageInEventLoop( message, handler, false ); - } - } - - @Override - public void write( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2 ) - { - if ( verifyOpen( handler1, handler2 ) ) - { - writeMessagesInEventLoop( message1, handler1, message2, handler2, false ); - } - } - - @Override - public void writeAndFlush( Message message, ResponseHandler handler ) - { - if ( verifyOpen( handler, null ) ) - { - writeMessageInEventLoop( message, handler, true ); - } - } - - @Override - public void writeAndFlush( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2 ) - { - if ( verifyOpen( handler1, handler2 ) ) - { - writeMessagesInEventLoop( message1, handler1, message2, handler2, true ); - } - } - - @Override - public CompletionStage reset() - { - CompletableFuture result = new CompletableFuture<>(); - ResetResponseHandler handler = new ResetResponseHandler( messageDispatcher, result ); - writeResetMessageIfNeeded( handler, true ); - return result; - } - - @Override - public CompletionStage release() - { - if ( status.compareAndSet( Status.OPEN, Status.RELEASED ) ) - { - ChannelReleasingResetResponseHandler handler = new ChannelReleasingResetResponseHandler( channel, - channelPool, messageDispatcher, clock, releaseFuture ); - - writeResetMessageIfNeeded( handler, false ); - metricsListener.afterConnectionReleased( this.serverAddress, this.inUseEvent ); - } - return releaseFuture; - } - - @Override - public void terminateAndRelease( String reason ) - { - if ( status.compareAndSet( Status.OPEN, Status.TERMINATED ) ) - { - setTerminationReason( channel, reason ); - channel.close(); - channelPool.release( channel ); - releaseFuture.complete( null ); - metricsListener.afterConnectionReleased( this.serverAddress, this.inUseEvent ); - } - } - - @Override - public BoltServerAddress serverAddress() - { - return serverAddress; - } - - @Override - public ServerVersion serverVersion() - { - return serverVersion; - } - - @Override - public BoltProtocol protocol() - { - return protocol; - } - - private void writeResetMessageIfNeeded( ResponseHandler resetHandler, boolean isSessionReset ) - { - channel.eventLoop().execute( () -> - { - if ( isSessionReset && !isOpen() ) - { - resetHandler.onSuccess( emptyMap() ); - } - else - { - // auto-read could've been disabled, re-enable it to automatically receive response for RESET - setAutoRead( true ); - - messageDispatcher.enqueue( resetHandler ); - channel.writeAndFlush( ResetMessage.RESET, channel.voidPromise() ); - } - } ); - } - - private void flushInEventLoop() - { - channel.eventLoop().execute( channel::flush ); - } - - private void writeMessageInEventLoop( Message message, ResponseHandler handler, boolean flush ) - { - channel.eventLoop().execute( () -> - { - messageDispatcher.enqueue( handler ); - - if ( flush ) - { - channel.writeAndFlush( message, channel.voidPromise() ); - } - else - { - channel.write( message, channel.voidPromise() ); - } - } ); - } - - private void writeMessagesInEventLoop( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2, boolean flush ) - { - channel.eventLoop().execute( () -> - { - messageDispatcher.enqueue( handler1 ); - messageDispatcher.enqueue( handler2 ); - - channel.write( message1, channel.voidPromise() ); - - if ( flush ) - { - channel.writeAndFlush( message2, channel.voidPromise() ); - } - else - { - channel.write( message2, channel.voidPromise() ); - } - } ); - } - - private void setAutoRead( boolean value ) - { - channel.config().setAutoRead( value ); - } - - private boolean verifyOpen( ResponseHandler handler1, ResponseHandler handler2 ) - { - Status connectionStatus = this.status.get(); - switch ( connectionStatus ) - { - case OPEN: - return true; - case RELEASED: - Exception error = new IllegalStateException( "Connection has been released to the pool and can't be used" ); - if ( handler1 != null ) - { - handler1.onFailure( error ); - } - if ( handler2 != null ) - { - handler2.onFailure( error ); - } - return false; - case TERMINATED: - Exception terminatedError = new IllegalStateException( "Connection has been terminated and can't be used" ); - if ( handler1 != null ) - { - handler1.onFailure( terminatedError ); - } - if ( handler2 != null ) - { - handler2.onFailure( terminatedError ); - } - return false; - default: - throw new IllegalStateException( "Unknown status: " + connectionStatus ); - } - } - - private enum Status - { - OPEN, - RELEASED, - TERMINATED - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/EventLoopGroupFactory.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/EventLoopGroupFactory.java deleted file mode 100644 index 2592f10e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/EventLoopGroupFactory.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.Channel; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.util.concurrent.DefaultThreadFactory; -import io.netty.util.concurrent.FastThreadLocalThread; - -import java.util.concurrent.Executor; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadFactory; - -import org.neo4j.driver.Session; -import org.neo4j.driver.async.AsyncSession; - -/** - * Manages creation of Netty {@link EventLoopGroup}s, which are basically {@link Executor}s that perform IO operations. - */ -public final class EventLoopGroupFactory -{ - private static final String THREAD_NAME_PREFIX = "Neo4jDriverIO"; - private static final int THREAD_PRIORITY = Thread.MAX_PRIORITY; - - private EventLoopGroupFactory() - { - } - - /** - * Get class of {@link Channel} for {@link Bootstrap#channel(Class)} method. - * - * @return class of the channel, which should be consistent with {@link EventLoopGroup}s returned by - * {@link #newEventLoopGroup()} and {@link #newEventLoopGroup(int)}. - */ - public static Class channelClass() - { - return NioSocketChannel.class; - } - - /** - * Create new {@link EventLoopGroup} with specified thread count. Returned group should by given to - * {@link Bootstrap#group(EventLoopGroup)}. - * - * @param threadCount amount of IO threads for the new group. - * @return new group consistent with channel class returned by {@link #channelClass()}. - */ - public static EventLoopGroup newEventLoopGroup( int threadCount ) - { - return new DriverEventLoopGroup( threadCount ); - } - - /** - * Create new {@link EventLoopGroup} with default thread count. Returned group should by given to - * {@link Bootstrap#group(EventLoopGroup)}. - * - * @return new group consistent with channel class returned by {@link #channelClass()}. - */ - public static EventLoopGroup newEventLoopGroup() - { - return new DriverEventLoopGroup(); - } - - /** - * Assert that current thread is not an event loop used for async IO operations. This check is needed because - * blocking API methods like {@link Session#run(String)} are implemented on top of corresponding async API methods - * like {@link AsyncSession#runAsync(String)} using basically {@link Future#get()} calls. Deadlocks might happen when IO - * thread executes blocking API call and has to wait for itself to read from the network. - * - * @throws IllegalStateException when current thread is an event loop IO thread. - */ - public static void assertNotInEventLoopThread() throws IllegalStateException - { - if ( isEventLoopThread( Thread.currentThread() ) ) - { - throw new IllegalStateException( - "Blocking operation can't be executed in IO thread because it might result in a deadlock. " + - "Please do not use blocking API when chaining futures returned by async API methods." ); - } - } - - /** - * Check if given thread is an event loop IO thread. - * - * @param thread the thread to check. - * @return {@code true} when given thread belongs to the event loop, {@code false} otherwise. - */ - public static boolean isEventLoopThread( Thread thread ) - { - return thread instanceof DriverThread; - } - - /** - * Same as {@link NioEventLoopGroup} but uses a different {@link ThreadFactory} that produces threads of - * {@link DriverThread} class. Such threads can be recognized by {@link #assertNotInEventLoopThread()}. - */ - private static class DriverEventLoopGroup extends NioEventLoopGroup - { - DriverEventLoopGroup() - { - } - - DriverEventLoopGroup( int nThreads ) - { - super( nThreads ); - } - - @Override - protected ThreadFactory newDefaultThreadFactory() - { - return new DriverThreadFactory(); - } - } - - /** - * Same as {@link DefaultThreadFactory} created by {@link NioEventLoopGroup} by default, except produces threads of - * {@link DriverThread} class. Such threads can be recognized by {@link #assertNotInEventLoopThread()}. - */ - private static class DriverThreadFactory extends DefaultThreadFactory - { - DriverThreadFactory() - { - super( THREAD_NAME_PREFIX, THREAD_PRIORITY ); - } - - @Override - protected Thread newThread( Runnable r, String name ) - { - return new DriverThread( threadGroup, r, name ); - } - } - - /** - * Same as default thread created by {@link DefaultThreadFactory} except this dedicated class can be easily - * recognized by {@link #assertNotInEventLoopThread()}. - */ - private static class DriverThread extends FastThreadLocalThread - { - DriverThread( ThreadGroup group, Runnable target, String name ) - { - super( group, target, name ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/HandshakeCompletedListener.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/HandshakeCompletedListener.java deleted file mode 100644 index a66c603b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/HandshakeCompletedListener.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelPromise; - -import java.util.Map; - -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.Value; - -import static java.util.Objects.requireNonNull; - -public class HandshakeCompletedListener implements ChannelFutureListener -{ - private final String userAgent; - private final Map authToken; - private final ChannelPromise connectionInitializedPromise; - - public HandshakeCompletedListener( String userAgent, Map authToken, - ChannelPromise connectionInitializedPromise ) - { - this.userAgent = requireNonNull( userAgent ); - this.authToken = requireNonNull( authToken ); - this.connectionInitializedPromise = requireNonNull( connectionInitializedPromise ); - } - - @Override - public void operationComplete( ChannelFuture future ) - { - if ( future.isSuccess() ) - { - BoltProtocol protocol = BoltProtocol.forChannel( future.channel() ); - protocol.initializeChannel( userAgent, authToken, connectionInitializedPromise ); - } - else - { - connectionInitializedPromise.setFailure( future.cause() ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/HandshakeHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/HandshakeHandler.java deleted file mode 100644 index cd1af387..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/HandshakeHandler.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.DecoderException; -import io.netty.handler.codec.ReplayingDecoder; - -import java.util.List; -import javax.net.ssl.SSLHandshakeException; - -import org.neo4j.driver.internal.logging.ChannelActivityLogger; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.util.ErrorUtil; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.exceptions.SecurityException; -import org.neo4j.driver.exceptions.ServiceUnavailableException; - -import static org.neo4j.driver.internal.async.connection.BoltProtocolUtil.HTTP; -import static org.neo4j.driver.internal.async.connection.BoltProtocolUtil.NO_PROTOCOL_VERSION; - -public class HandshakeHandler extends ReplayingDecoder -{ - private final ChannelPipelineBuilder pipelineBuilder; - private final ChannelPromise handshakeCompletedPromise; - private final Logging logging; - - private boolean failed; - private Logger log; - - public HandshakeHandler( ChannelPipelineBuilder pipelineBuilder, ChannelPromise handshakeCompletedPromise, - Logging logging ) - { - this.pipelineBuilder = pipelineBuilder; - this.handshakeCompletedPromise = handshakeCompletedPromise; - this.logging = logging; - } - - @Override - public void handlerAdded( ChannelHandlerContext ctx ) - { - log = new ChannelActivityLogger( ctx.channel(), logging, getClass() ); - } - - @Override - protected void handlerRemoved0( ChannelHandlerContext ctx ) - { - failed = false; - log = null; - } - - @Override - public void channelInactive( ChannelHandlerContext ctx ) - { - log.debug( "Channel is inactive" ); - - if ( !failed ) - { - // channel became inactive while doing bolt handshake, not because of some previous error - ServiceUnavailableException error = ErrorUtil.newConnectionTerminatedError(); - fail( ctx, error ); - } - } - - @Override - public void exceptionCaught( ChannelHandlerContext ctx, Throwable error ) - { - if ( failed ) - { - log.warn( "Another fatal error occurred in the pipeline", error ); - } - else - { - failed = true; - Throwable cause = transformError( error ); - fail( ctx, cause ); - } - } - - @Override - protected void decode( ChannelHandlerContext ctx, ByteBuf in, List out ) - { - int serverSuggestedVersion = in.readInt(); - log.debug( "S: [Bolt Handshake] %d", serverSuggestedVersion ); - - // this is a one-time handler, remove it when protocol version has been read - ctx.pipeline().remove( this ); - - BoltProtocol protocol = protocolForVersion( serverSuggestedVersion ); - if ( protocol != null ) - { - protocolSelected( serverSuggestedVersion, protocol.createMessageFormat(), ctx ); - } - else - { - handleUnknownSuggestedProtocolVersion( serverSuggestedVersion, ctx ); - } - } - - private BoltProtocol protocolForVersion( int version ) - { - try - { - return BoltProtocol.forVersion( version ); - } - catch ( ClientException e ) - { - return null; - } - } - - private void protocolSelected( int version, MessageFormat messageFormat, ChannelHandlerContext ctx ) - { - ChannelAttributes.setProtocolVersion( ctx.channel(), version ); - pipelineBuilder.build( messageFormat, ctx.pipeline(), logging ); - handshakeCompletedPromise.setSuccess(); - } - - private void handleUnknownSuggestedProtocolVersion( int version, ChannelHandlerContext ctx ) - { - switch ( version ) - { - case NO_PROTOCOL_VERSION: - fail( ctx, protocolNoSupportedByServerError() ); - break; - case HTTP: - fail( ctx, httpEndpointError() ); - break; - default: - fail( ctx, protocolNoSupportedByDriverError( version ) ); - break; - } - } - - private void fail( ChannelHandlerContext ctx, Throwable error ) - { - ctx.close().addListener( future -> handshakeCompletedPromise.tryFailure( error ) ); - } - - private static Throwable protocolNoSupportedByServerError() - { - return new ClientException( "The server does not support any of the protocol versions supported by " + - "this driver. Ensure that you are using driver and server versions that " + - "are compatible with one another." ); - } - - private static Throwable httpEndpointError() - { - return new ClientException( - "Server responded HTTP. Make sure you are not trying to connect to the http endpoint " + - "(HTTP defaults to port 7474 whereas BOLT defaults to port 7687)" ); - } - - private static Throwable protocolNoSupportedByDriverError( int suggestedProtocolVersion ) - { - return new ClientException( - "Protocol error, server suggested unexpected protocol version: " + suggestedProtocolVersion ); - } - - private static Throwable transformError( Throwable error ) - { - if ( error instanceof DecoderException && error.getCause() != null ) - { - // unwrap the DecoderException if it has a cause - error = error.getCause(); - } - - if ( error instanceof ServiceUnavailableException ) - { - return error; - } - else if ( error instanceof SSLHandshakeException ) - { - return new SecurityException( "Failed to establish secured connection with the server", error ); - } - else - { - return new ServiceUnavailableException( "Failed to establish connection with the server", error ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/NettyChannelInitializer.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/NettyChannelInitializer.java deleted file mode 100644 index 74ec34be..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/NettyChannelInitializer.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelInitializer; -import io.netty.handler.ssl.SslHandler; - -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.inbound.InboundMessageDispatcher; -import org.neo4j.driver.internal.security.SecurityPlan; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.Logging; - -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.setCreationTimestamp; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.setMessageDispatcher; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.setServerAddress; - -public class NettyChannelInitializer extends ChannelInitializer -{ - private final BoltServerAddress address; - private final SecurityPlan securityPlan; - private final int connectTimeoutMillis; - private final Clock clock; - private final Logging logging; - - public NettyChannelInitializer( BoltServerAddress address, SecurityPlan securityPlan, int connectTimeoutMillis, - Clock clock, Logging logging ) - { - this.address = address; - this.securityPlan = securityPlan; - this.connectTimeoutMillis = connectTimeoutMillis; - this.clock = clock; - this.logging = logging; - } - - @Override - protected void initChannel( Channel channel ) - { - if ( securityPlan.requiresEncryption() ) - { - SslHandler sslHandler = createSslHandler(); - channel.pipeline().addFirst( sslHandler ); - } - - updateChannelAttributes( channel ); - } - - private SslHandler createSslHandler() - { - SSLEngine sslEngine = createSslEngine(); - SslHandler sslHandler = new SslHandler( sslEngine ); - sslHandler.setHandshakeTimeoutMillis( connectTimeoutMillis ); - return sslHandler; - } - - private SSLEngine createSslEngine() - { - SSLContext sslContext = securityPlan.sslContext(); - SSLEngine sslEngine = sslContext.createSSLEngine( address.originalHost(), address.port() ); - sslEngine.setUseClientMode( true ); - if ( securityPlan.requiresHostnameVerification() ) - { - SSLParameters sslParameters = sslEngine.getSSLParameters(); - sslParameters.setEndpointIdentificationAlgorithm( "HTTPS" ); - sslEngine.setSSLParameters( sslParameters ); - } - return sslEngine; - } - - private void updateChannelAttributes( Channel channel ) - { - setServerAddress( channel, address ); - setCreationTimestamp( channel, clock.millis() ); - setMessageDispatcher( channel, new InboundMessageDispatcher( channel, logging ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/RoutingConnection.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/RoutingConnection.java deleted file mode 100644 index 16528529..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/connection/RoutingConnection.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.connection; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.RoutingErrorHandler; -import org.neo4j.driver.internal.handlers.RoutingResponseHandler; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.ServerVersion; -import org.neo4j.driver.AccessMode; - -public class RoutingConnection implements Connection -{ - private final Connection delegate; - private final AccessMode accessMode; - private final RoutingErrorHandler errorHandler; - - public RoutingConnection( Connection delegate, AccessMode accessMode, RoutingErrorHandler errorHandler ) - { - this.delegate = delegate; - this.accessMode = accessMode; - this.errorHandler = errorHandler; - } - - @Override - public void enableAutoRead() - { - delegate.enableAutoRead(); - } - - @Override - public void disableAutoRead() - { - delegate.disableAutoRead(); - } - - @Override - public void write( Message message, ResponseHandler handler ) - { - delegate.write( message, newRoutingResponseHandler( handler ) ); - } - - @Override - public void write( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2 ) - { - delegate.write( message1, newRoutingResponseHandler( handler1 ), message2, newRoutingResponseHandler( handler2 ) ); - } - - @Override - public void writeAndFlush( Message message, ResponseHandler handler ) - { - delegate.writeAndFlush( message, newRoutingResponseHandler( handler ) ); - } - - @Override - public void writeAndFlush( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2 ) - { - delegate.writeAndFlush( message1, newRoutingResponseHandler( handler1 ), message2, newRoutingResponseHandler( handler2 ) ); - } - - @Override - public CompletionStage reset() - { - return delegate.reset(); - } - - @Override - public boolean isOpen() - { - return delegate.isOpen(); - } - - @Override - public CompletionStage release() - { - return delegate.release(); - } - - @Override - public void terminateAndRelease( String reason ) - { - delegate.terminateAndRelease( reason ); - } - - @Override - public BoltServerAddress serverAddress() - { - return delegate.serverAddress(); - } - - @Override - public ServerVersion serverVersion() - { - return delegate.serverVersion(); - } - - @Override - public BoltProtocol protocol() - { - return delegate.protocol(); - } - - @Override - public void flush() - { - delegate.flush(); - } - - private RoutingResponseHandler newRoutingResponseHandler( ResponseHandler handler ) - { - return new RoutingResponseHandler( handler, serverAddress(), accessMode, errorHandler ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ChannelErrorHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ChannelErrorHandler.java deleted file mode 100644 index c80345df..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ChannelErrorHandler.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.inbound; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.handler.codec.CodecException; - -import java.io.IOException; - -import org.neo4j.driver.internal.logging.ChannelActivityLogger; -import org.neo4j.driver.internal.util.ErrorUtil; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.exceptions.ServiceUnavailableException; - -import static java.util.Objects.requireNonNull; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.messageDispatcher; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.terminationReason; - -public class ChannelErrorHandler extends ChannelInboundHandlerAdapter -{ - private final Logging logging; - - private InboundMessageDispatcher messageDispatcher; - private Logger log; - private boolean failed; - - public ChannelErrorHandler( Logging logging ) - { - this.logging = logging; - } - - @Override - public void handlerAdded( ChannelHandlerContext ctx ) - { - messageDispatcher = requireNonNull( messageDispatcher( ctx.channel() ) ); - log = new ChannelActivityLogger( ctx.channel(), logging, getClass() ); - } - - @Override - public void handlerRemoved( ChannelHandlerContext ctx ) - { - messageDispatcher = null; - log = null; - failed = false; - } - - @Override - public void channelInactive( ChannelHandlerContext ctx ) - { - log.debug( "Channel is inactive" ); - - if ( !failed ) - { - // channel became inactive not because of a fatal exception that came from exceptionCaught - // it is most likely inactive because actual network connection broke or was explicitly closed by the driver - - String terminationReason = terminationReason( ctx.channel() ); - ServiceUnavailableException error = ErrorUtil.newConnectionTerminatedError( terminationReason ); - fail( ctx, error ); - } - } - - @Override - public void exceptionCaught( ChannelHandlerContext ctx, Throwable error ) - { - if ( failed ) - { - log.warn( "Another fatal error occurred in the pipeline", error ); - } - else - { - failed = true; - log.error( "Fatal error occurred in the pipeline", error ); - fail( ctx, error ); - } - } - - private void fail( ChannelHandlerContext ctx, Throwable error ) - { - Throwable cause = transformError( error ); - messageDispatcher.handleFatalError( cause ); - log.debug( "Closing channel because of a failure '%s'", error ); - ctx.close(); - } - - private static Throwable transformError( Throwable error ) - { - if ( error instanceof CodecException && error.getCause() != null ) - { - // unwrap the CodecException if it has a cause - error = error.getCause(); - } - - if ( error instanceof IOException ) - { - return new ServiceUnavailableException( "Connection to the database failed", error ); - } - else - { - return error; - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ChunkDecoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ChunkDecoder.java deleted file mode 100644 index c1d51f67..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ChunkDecoder.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.inbound; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufUtil; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; - -import org.neo4j.driver.internal.logging.ChannelActivityLogger; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -public class ChunkDecoder extends LengthFieldBasedFrameDecoder -{ - private static final int MAX_FRAME_BODY_LENGTH = 0xFFFF; - private static final int LENGTH_FIELD_OFFSET = 0; - private static final int LENGTH_FIELD_LENGTH = 2; - private static final int LENGTH_ADJUSTMENT = 0; - private static final int INITIAL_BYTES_TO_STRIP = LENGTH_FIELD_LENGTH; - private static final int MAX_FRAME_LENGTH = LENGTH_FIELD_LENGTH + MAX_FRAME_BODY_LENGTH; - - private final Logging logging; - private Logger log; - - public ChunkDecoder( Logging logging ) - { - super( MAX_FRAME_LENGTH, LENGTH_FIELD_OFFSET, LENGTH_FIELD_LENGTH, LENGTH_ADJUSTMENT, INITIAL_BYTES_TO_STRIP ); - this.logging = logging; - } - - @Override - public void handlerAdded( ChannelHandlerContext ctx ) - { - log = new ChannelActivityLogger( ctx.channel(), logging, getClass() ); - } - - @Override - protected void handlerRemoved0( ChannelHandlerContext ctx ) - { - log = null; - } - - @Override - protected ByteBuf extractFrame( ChannelHandlerContext ctx, ByteBuf buffer, int index, int length ) - { - if ( log.isTraceEnabled() ) - { - int originalReaderIndex = buffer.readerIndex(); - int readerIndexWithChunkHeader = originalReaderIndex - INITIAL_BYTES_TO_STRIP; - int lengthWithChunkHeader = INITIAL_BYTES_TO_STRIP + length; - String hexDump = ByteBufUtil.hexDump( buffer, readerIndexWithChunkHeader, lengthWithChunkHeader ); - log.trace( "S: %s", hexDump ); - } - return super.extractFrame( ctx, buffer, index, length ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ConnectTimeoutHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ConnectTimeoutHandler.java deleted file mode 100644 index 48aeff40..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/ConnectTimeoutHandler.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.inbound; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.timeout.ReadTimeoutHandler; - -import java.util.concurrent.TimeUnit; - -import org.neo4j.driver.exceptions.ServiceUnavailableException; - -/** - * Handler needed to limit amount of time connection performs TLS and Bolt handshakes. - * It should only be used when connection is established and removed from the pipeline afterwards. - * Otherwise it will make long running queries fail. - */ -public class ConnectTimeoutHandler extends ReadTimeoutHandler -{ - private final long timeoutMillis; - private boolean triggered; - - public ConnectTimeoutHandler( long timeoutMillis ) - { - super( timeoutMillis, TimeUnit.MILLISECONDS ); - this.timeoutMillis = timeoutMillis; - } - - @Override - protected void readTimedOut( ChannelHandlerContext ctx ) - { - if ( !triggered ) - { - triggered = true; - ctx.fireExceptionCaught( unableToConnectError() ); - } - } - - private ServiceUnavailableException unableToConnectError() - { - return new ServiceUnavailableException( "Unable to establish connection in " + timeoutMillis + "ms" ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/InboundMessageDispatcher.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/InboundMessageDispatcher.java deleted file mode 100644 index e9eb3689..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/InboundMessageDispatcher.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.inbound; - -import io.netty.channel.Channel; - -import java.util.Arrays; -import java.util.LinkedList; -import java.util.Map; -import java.util.Queue; - -import org.neo4j.driver.internal.handlers.ResetResponseHandler; -import org.neo4j.driver.internal.logging.ChannelActivityLogger; -import org.neo4j.driver.internal.messaging.ResponseMessageHandler; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.ErrorUtil; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.Value; -import org.neo4j.driver.exceptions.ClientException; - -import static java.util.Objects.requireNonNull; -import static org.neo4j.driver.internal.messaging.request.ResetMessage.RESET; - -public class InboundMessageDispatcher implements ResponseMessageHandler -{ - private final Channel channel; - private final Queue handlers = new LinkedList<>(); - private final Logger log; - - private Throwable currentError; - private boolean fatalErrorOccurred; - - private ResponseHandler autoReadManagingHandler; - - public InboundMessageDispatcher( Channel channel, Logging logging ) - { - this.channel = requireNonNull( channel ); - this.log = new ChannelActivityLogger( channel, logging, getClass() ); - } - - public void enqueue( ResponseHandler handler ) - { - if ( fatalErrorOccurred ) - { - handler.onFailure( currentError ); - } - else - { - handlers.add( handler ); - updateAutoReadManagingHandlerIfNeeded( handler ); - } - } - - public int queuedHandlersCount() - { - return handlers.size(); - } - - @Override - public void handleSuccessMessage( Map meta ) - { - log.debug( "S: SUCCESS %s", meta ); - ResponseHandler handler = removeHandler(); - handler.onSuccess( meta ); - } - - @Override - public void handleRecordMessage( Value[] fields ) - { - if ( log.isDebugEnabled() ) - { - log.debug( "S: RECORD %s", Arrays.toString( fields ) ); - } - ResponseHandler handler = handlers.peek(); - if ( handler == null ) - { - throw new IllegalStateException( "No handler exists to handle RECORD message with fields: " + Arrays.toString( fields ) ); - } - handler.onRecord( fields ); - } - - @Override - public void handleFailureMessage( String code, String message ) - { - log.debug( "S: FAILURE %s \"%s\"", code, message ); - - currentError = ErrorUtil.newNeo4jError( code, message ); - - if ( ErrorUtil.isFatal( currentError ) ) - { - // we should not continue using channel after a fatal error - // fire error event back to the pipeline and avoid sending RESET - channel.pipeline().fireExceptionCaught( currentError ); - return; - } - - // write a RESET to "acknowledge" the failure - enqueue( new ResetResponseHandler( this ) ); - channel.writeAndFlush( RESET, channel.voidPromise() ); - - ResponseHandler handler = removeHandler(); - handler.onFailure( currentError ); - } - - @Override - public void handleIgnoredMessage() - { - log.debug( "S: IGNORED" ); - - ResponseHandler handler = removeHandler(); - - Throwable error; - if ( currentError != null ) - { - error = currentError; - } - else - { - log.warn( "Received IGNORED message for handler %s but error is missing and RESET is not in progress. " + - "Current handlers %s", handler, handlers ); - - error = new ClientException( "Database ignored the request" ); - } - handler.onFailure( error ); - } - - public void handleFatalError( Throwable error ) - { - currentError = error; - fatalErrorOccurred = true; - - while ( !handlers.isEmpty() ) - { - ResponseHandler handler = removeHandler(); - handler.onFailure( currentError ); - } - } - - public void clearCurrentError() - { - currentError = null; - } - - public Throwable currentError() - { - return currentError; - } - - public boolean fatalErrorOccurred() - { - return fatalErrorOccurred; - } - - /** - * Visible for testing - */ - ResponseHandler autoReadManagingHandler() - { - return autoReadManagingHandler; - } - - private ResponseHandler removeHandler() - { - ResponseHandler handler = handlers.remove(); - if ( handler == autoReadManagingHandler ) - { - // the auto-read managing handler is being removed - // make sure this dispatcher does not hold on to a removed handler - updateAutoReadManagingHandler( null ); - } - return handler; - } - - private void updateAutoReadManagingHandlerIfNeeded( ResponseHandler handler ) - { - if ( handler.canManageAutoRead() ) - { - updateAutoReadManagingHandler( handler ); - } - } - - private void updateAutoReadManagingHandler( ResponseHandler newHandler ) - { - if ( autoReadManagingHandler != null ) - { - // there already exists a handler that manages channel's auto-read - // make it stop because new managing handler is being added and there should only be a single such handler - autoReadManagingHandler.disableAutoReadManagement(); - // restore the default value of auto-read - channel.config().setAutoRead( true ); - } - autoReadManagingHandler = newHandler; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/MessageDecoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/MessageDecoder.java deleted file mode 100644 index 899a273f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/inbound/MessageDecoder.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.inbound; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.ByteToMessageDecoder; - -import java.util.List; - -public class MessageDecoder extends ByteToMessageDecoder -{ - private static final Cumulator DEFAULT_CUMULATOR = determineDefaultCumulator(); - - private boolean readMessageBoundary; - - public MessageDecoder() - { - setCumulator( DEFAULT_CUMULATOR ); - } - - @Override - public void channelRead( ChannelHandlerContext ctx, Object msg ) throws Exception - { - if ( msg instanceof ByteBuf ) - { - // on every read check if input buffer is empty or not - // if it is empty then it's a message boundary and full message is in the buffer - readMessageBoundary = ((ByteBuf) msg).readableBytes() == 0; - } - super.channelRead( ctx, msg ); - } - - @Override - protected void decode( ChannelHandlerContext ctx, ByteBuf in, List out ) - { - if ( readMessageBoundary ) - { - // now we have a complete message in the input buffer - - // increment ref count of the buffer and create it's duplicate that shares the content - // duplicate will be the output of this decoded and input for the next one - ByteBuf messageBuf = in.retainedDuplicate(); - - // signal that whole message was read by making input buffer seem like it was fully read/consumed - in.readerIndex( in.readableBytes() ); - - // pass the full message to the next handler in the pipeline - out.add( messageBuf ); - - readMessageBoundary = false; - } - } - - private static Cumulator determineDefaultCumulator() - { - String value = System.getProperty( "messageDecoderCumulator", "" ); - if ( "merge".equals( value ) ) - { - return MERGE_CUMULATOR; - } - return COMPOSITE_CUMULATOR; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/outbound/ChunkAwareByteBufOutput.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/outbound/ChunkAwareByteBufOutput.java deleted file mode 100644 index d65d944c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/outbound/ChunkAwareByteBufOutput.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.outbound; - -import io.netty.buffer.ByteBuf; - -import org.neo4j.driver.internal.async.connection.BoltProtocolUtil; -import org.neo4j.driver.internal.packstream.PackOutput; - -import static java.util.Objects.requireNonNull; -import static org.neo4j.driver.internal.async.connection.BoltProtocolUtil.CHUNK_HEADER_SIZE_BYTES; -import static org.neo4j.driver.internal.async.connection.BoltProtocolUtil.DEFAULT_MAX_OUTBOUND_CHUNK_SIZE_BYTES; - -public class ChunkAwareByteBufOutput implements PackOutput -{ - private final int maxChunkSize; - - private ByteBuf buf; - private int currentChunkStartIndex; - private int currentChunkSize; - - public ChunkAwareByteBufOutput() - { - this( DEFAULT_MAX_OUTBOUND_CHUNK_SIZE_BYTES ); - } - - ChunkAwareByteBufOutput( int maxChunkSize ) - { - this.maxChunkSize = verifyMaxChunkSize( maxChunkSize ); - } - - public void start( ByteBuf newBuf ) - { - assertNotStarted(); - buf = requireNonNull( newBuf ); - startNewChunk( 0 ); - } - - public void stop() - { - writeChunkSizeHeader(); - buf = null; - currentChunkStartIndex = 0; - currentChunkSize = 0; - } - - @Override - public PackOutput writeByte( byte value ) - { - ensureCanFitInCurrentChunk( 1 ); - buf.writeByte( value ); - currentChunkSize += 1; - return this; - } - - @Override - public PackOutput writeBytes( byte[] data ) - { - int offset = 0; - int length = data.length; - while ( offset < length ) - { - // Ensure there is an open chunk, and that it has at least one byte of space left - ensureCanFitInCurrentChunk( 1 ); - - // Write as much as we can into the current chunk - int amountToWrite = Math.min( availableBytesInCurrentChunk(), length - offset ); - - buf.writeBytes( data, offset, amountToWrite ); - currentChunkSize += amountToWrite; - offset += amountToWrite; - } - return this; - } - - @Override - public PackOutput writeShort( short value ) - { - ensureCanFitInCurrentChunk( 2 ); - buf.writeShort( value ); - currentChunkSize += 2; - return this; - } - - @Override - public PackOutput writeInt( int value ) - { - ensureCanFitInCurrentChunk( 4 ); - buf.writeInt( value ); - currentChunkSize += 4; - return this; - } - - @Override - public PackOutput writeLong( long value ) - { - ensureCanFitInCurrentChunk( 8 ); - buf.writeLong( value ); - currentChunkSize += 8; - return this; - } - - @Override - public PackOutput writeDouble( double value ) - { - ensureCanFitInCurrentChunk( 8 ); - buf.writeDouble( value ); - currentChunkSize += 8; - return this; - } - - private void ensureCanFitInCurrentChunk( int numberOfBytes ) - { - int targetChunkSize = currentChunkSize + numberOfBytes; - if ( targetChunkSize > maxChunkSize ) - { - writeChunkSizeHeader(); - startNewChunk( buf.writerIndex() ); - } - } - - private void startNewChunk( int index ) - { - currentChunkStartIndex = index; - BoltProtocolUtil.writeEmptyChunkHeader( buf ); - currentChunkSize = CHUNK_HEADER_SIZE_BYTES; - } - - private void writeChunkSizeHeader() - { - // go to the beginning of the chunk and write the size header - int chunkBodySize = currentChunkSize - CHUNK_HEADER_SIZE_BYTES; - BoltProtocolUtil.writeChunkHeader( buf, currentChunkStartIndex, chunkBodySize ); - } - - private int availableBytesInCurrentChunk() - { - return maxChunkSize - currentChunkSize; - } - - private void assertNotStarted() - { - if ( buf != null ) - { - throw new IllegalStateException( "Already started" ); - } - } - - private static int verifyMaxChunkSize( int maxChunkSize ) - { - if ( maxChunkSize <= 0 ) - { - throw new IllegalArgumentException( "Max chunk size should be > 0, given: " + maxChunkSize ); - } - return maxChunkSize; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/outbound/OutboundMessageHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/outbound/OutboundMessageHandler.java deleted file mode 100644 index 97c189fd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/outbound/OutboundMessageHandler.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.outbound; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.EncoderException; -import io.netty.handler.codec.MessageToMessageEncoder; - -import java.util.List; - -import org.neo4j.driver.internal.async.connection.BoltProtocolUtil; -import org.neo4j.driver.internal.logging.ChannelActivityLogger; -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -import static io.netty.buffer.ByteBufUtil.hexDump; - -public class OutboundMessageHandler extends MessageToMessageEncoder -{ - public static final String NAME = OutboundMessageHandler.class.getSimpleName(); - - private final MessageFormat messageFormat; - private final ChunkAwareByteBufOutput output; - private final MessageFormat.Writer writer; - private final Logging logging; - - private Logger log; - - public OutboundMessageHandler( MessageFormat messageFormat, Logging logging ) - { - this( messageFormat, true, logging ); - } - - private OutboundMessageHandler( MessageFormat messageFormat, boolean byteArraySupportEnabled, Logging logging ) - { - this.messageFormat = messageFormat; - this.output = new ChunkAwareByteBufOutput(); - this.writer = messageFormat.newWriter( output, byteArraySupportEnabled ); - this.logging = logging; - } - - @Override - public void handlerAdded( ChannelHandlerContext ctx ) - { - log = new ChannelActivityLogger( ctx.channel(), logging, getClass() ); - } - - @Override - public void handlerRemoved( ChannelHandlerContext ctx ) - { - log = null; - } - - @Override - protected void encode( ChannelHandlerContext ctx, Message msg, List out ) - { - log.debug( "C: %s", msg ); - - ByteBuf messageBuf = ctx.alloc().ioBuffer(); - output.start( messageBuf ); - try - { - writer.write( msg ); - output.stop(); - } - catch ( Throwable error ) - { - output.stop(); - // release buffer because it will not get added to the out list and no other handler is going to handle it - messageBuf.release(); - throw new EncoderException( "Failed to write outbound message: " + msg, error ); - } - - if ( log.isTraceEnabled() ) - { - log.trace( "C: %s", hexDump( messageBuf ) ); - } - - BoltProtocolUtil.writeMessageBoundary( messageBuf ); - out.add( messageBuf ); - } - - public OutboundMessageHandler withoutByteArraySupport() - { - return new OutboundMessageHandler( messageFormat, false, logging ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/ConnectionPoolImpl.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/ConnectionPoolImpl.java deleted file mode 100644 index 8a1dd9be..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/ConnectionPoolImpl.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.pool; - -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.Channel; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; -import io.netty.util.concurrent.Future; - -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.connection.ChannelConnector; -import org.neo4j.driver.internal.async.connection.DirectConnection; -import org.neo4j.driver.internal.metrics.ListenerEvent; -import org.neo4j.driver.internal.metrics.MetricsListener; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ConnectionPool; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.exceptions.ClientException; - -public class ConnectionPoolImpl implements ConnectionPool -{ - private final ChannelConnector connector; - private final Bootstrap bootstrap; - private final NettyChannelTracker nettyChannelTracker; - private final NettyChannelHealthChecker channelHealthChecker; - private final PoolSettings settings; - private final Clock clock; - private final Logger log; - private final MetricsListener metricsListener; - - private final ConcurrentMap pools = new ConcurrentHashMap<>(); - private final AtomicBoolean closed = new AtomicBoolean(); - - public ConnectionPoolImpl( ChannelConnector connector, Bootstrap bootstrap, PoolSettings settings, MetricsListener metricsListener, Logging logging, Clock clock ) - { - this( connector, bootstrap, new NettyChannelTracker( metricsListener, bootstrap.config().group().next(), logging ), settings, metricsListener, logging, clock ); - } - - ConnectionPoolImpl( ChannelConnector connector, Bootstrap bootstrap, NettyChannelTracker nettyChannelTracker, - PoolSettings settings, MetricsListener metricsListener, Logging logging, Clock clock ) - { - this.connector = connector; - this.bootstrap = bootstrap; - this.nettyChannelTracker = nettyChannelTracker; - this.channelHealthChecker = new NettyChannelHealthChecker( settings, clock, logging ); - this.settings = settings; - this.metricsListener = metricsListener; - this.clock = clock; - this.log = logging.getLog( ConnectionPool.class.getSimpleName() ); - } - - @Override - public CompletionStage acquire( BoltServerAddress address ) - { - log.trace( "Acquiring a connection from pool towards %s", address ); - - assertNotClosed(); - ChannelPool pool = getOrCreatePool( address ); - - ListenerEvent acquireEvent = metricsListener.createListenerEvent(); - metricsListener.beforeAcquiringOrCreating( address, acquireEvent ); - Future connectionFuture = pool.acquire(); - - return Futures.asCompletionStage( connectionFuture ).handle( ( channel, error ) -> - { - try - { - processAcquisitionError( address, error ); - assertNotClosed( address, channel, pool ); - Connection connection = new DirectConnection( channel, pool, clock, metricsListener ); - - metricsListener.afterAcquiredOrCreated( address, acquireEvent ); - return connection; - } - finally - { - metricsListener.afterAcquiringOrCreating( address ); - } - } ); - } - - @Override - public void retainAll( Set addressesToRetain ) - { - for ( BoltServerAddress address : pools.keySet() ) - { - if ( !addressesToRetain.contains( address ) ) - { - int activeChannels = nettyChannelTracker.inUseChannelCount( address ); - if ( activeChannels == 0 ) - { - // address is not present in updated routing table and has no active connections - // it's now safe to terminate corresponding connection pool and forget about it - - ChannelPool pool = pools.remove( address ); - if ( pool != null ) - { - log.info( "Closing connection pool towards %s, it has no active connections " + - "and is not in the routing table", address ); - pool.close(); - } - } - } - } - } - - @Override - public int inUseConnections( BoltServerAddress address ) - { - return nettyChannelTracker.inUseChannelCount( address ); - } - - @Override - public int idleConnections( BoltServerAddress address ) - { - return nettyChannelTracker.idleChannelCount( address ); - } - - @Override - public CompletionStage close() - { - if ( closed.compareAndSet( false, true ) ) - { - try - { - nettyChannelTracker.prepareToCloseChannels(); - for ( Map.Entry entry : pools.entrySet() ) - { - BoltServerAddress address = entry.getKey(); - ChannelPool pool = entry.getValue(); - log.info( "Closing connection pool towards %s", address ); - pool.close(); - } - - pools.clear(); - } - finally - { - eventLoopGroup().shutdownGracefully(); - } - } - return Futures.asCompletionStage( eventLoopGroup().terminationFuture() ) - .thenApply( ignore -> null ); - } - - @Override - public boolean isOpen( BoltServerAddress address ) - { - return pools.containsKey( address ); - } - - private ChannelPool getOrCreatePool( BoltServerAddress address ) - { - ChannelPool pool = pools.get( address ); - if ( pool != null ) - { - return pool; - } - - synchronized ( this ) - { - pool = pools.get( address ); - if ( pool != null ) - { - return pool; - } - - metricsListener.putPoolMetrics( address, this ); - pool = newPool( address ); - pools.put( address, pool ); - } - return pool; - } - - ChannelPool newPool( BoltServerAddress address ) - { - return new NettyChannelPool( address, connector, bootstrap, nettyChannelTracker, channelHealthChecker, - settings.connectionAcquisitionTimeout(), settings.maxConnectionPoolSize() ); - } - - private EventLoopGroup eventLoopGroup() - { - return bootstrap.config().group(); - } - - private void processAcquisitionError( BoltServerAddress serverAddress, Throwable error ) - { - Throwable cause = Futures.completionExceptionCause( error ); - if ( cause != null ) - { - if ( cause instanceof TimeoutException ) - { - // NettyChannelPool returns future failed with TimeoutException if acquire operation takes more than - // configured time, translate this exception to a prettier one and re-throw - metricsListener.afterTimedOutToAcquireOrCreate( serverAddress ); - throw new ClientException( - "Unable to acquire connection from the pool within configured maximum time of " + - settings.connectionAcquisitionTimeout() + "ms" ); - } - else - { - // some unknown error happened during connection acquisition, propagate it - throw new CompletionException( cause ); - } - } - } - - private void assertNotClosed() - { - if ( closed.get() ) - { - throw new IllegalStateException( "Pool closed" ); - } - } - - private void assertNotClosed( BoltServerAddress address, Channel channel, ChannelPool pool ) - { - if ( closed.get() ) - { - pool.release( channel ); - pool.close(); - pools.remove( address ); - assertNotClosed(); - } - } - - @Override - public String toString() - { - return "ConnectionPoolImpl{" + "pools=" + pools + '}'; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelHealthChecker.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelHealthChecker.java deleted file mode 100644 index 12fb6a46..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelHealthChecker.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.pool; - -import io.netty.channel.Channel; -import io.netty.channel.pool.ChannelHealthChecker; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.Promise; - -import org.neo4j.driver.internal.handlers.PingResponseHandler; -import org.neo4j.driver.internal.messaging.request.ResetMessage; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.creationTimestamp; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.lastUsedTimestamp; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.messageDispatcher; - -public class NettyChannelHealthChecker implements ChannelHealthChecker -{ - private final PoolSettings poolSettings; - private final Clock clock; - private final Logger log; - - public NettyChannelHealthChecker( PoolSettings poolSettings, Clock clock, Logging logging ) - { - this.poolSettings = poolSettings; - this.clock = clock; - this.log = logging.getLog( getClass().getSimpleName() ); - } - - @Override - public Future isHealthy( Channel channel ) - { - if ( isTooOld( channel ) ) - { - return channel.eventLoop().newSucceededFuture( Boolean.FALSE ); - } - if ( hasBeenIdleForTooLong( channel ) ) - { - return ping( channel ); - } - return ACTIVE.isHealthy( channel ); - } - - private boolean isTooOld( Channel channel ) - { - if ( poolSettings.maxConnectionLifetimeEnabled() ) - { - long creationTimestampMillis = creationTimestamp( channel ); - long currentTimestampMillis = clock.millis(); - - long ageMillis = currentTimestampMillis - creationTimestampMillis; - long maxAgeMillis = poolSettings.maxConnectionLifetime(); - - boolean tooOld = ageMillis > maxAgeMillis; - if ( tooOld ) - { - log.trace( "Failed acquire channel %s from the pool because it is too old: %s > %s", - channel, ageMillis, maxAgeMillis ); - } - - return tooOld; - } - return false; - } - - private boolean hasBeenIdleForTooLong( Channel channel ) - { - if ( poolSettings.idleTimeBeforeConnectionTestEnabled() ) - { - Long lastUsedTimestamp = lastUsedTimestamp( channel ); - if ( lastUsedTimestamp != null ) - { - long idleTime = clock.millis() - lastUsedTimestamp; - boolean idleTooLong = idleTime > poolSettings.idleTimeBeforeConnectionTest(); - - log.trace( "Channel %s has been idle for %s and needs a ping", channel, idleTime ); - - return idleTooLong; - } - } - return false; - } - - private Future ping( Channel channel ) - { - Promise result = channel.eventLoop().newPromise(); - messageDispatcher( channel ).enqueue( new PingResponseHandler( result, channel, log ) ); - channel.writeAndFlush( ResetMessage.RESET, channel.voidPromise() ); - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelPool.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelPool.java deleted file mode 100644 index 8168c09f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelPool.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.pool; - -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.pool.ChannelHealthChecker; -import io.netty.channel.pool.FixedChannelPool; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.connection.ChannelConnector; -import org.neo4j.driver.internal.metrics.ListenerEvent; - -import static java.util.Objects.requireNonNull; - -public class NettyChannelPool extends FixedChannelPool -{ - /** - * Unlimited amount of parties are allowed to request channels from the pool. - */ - private static final int MAX_PENDING_ACQUIRES = Integer.MAX_VALUE; - /** - * Do not check channels when they are returned to the pool. - */ - private static final boolean RELEASE_HEALTH_CHECK = false; - - private final BoltServerAddress address; - private final ChannelConnector connector; - private final NettyChannelTracker handler; - - public NettyChannelPool( BoltServerAddress address, ChannelConnector connector, Bootstrap bootstrap, NettyChannelTracker handler, - ChannelHealthChecker healthCheck, long acquireTimeoutMillis, int maxConnections ) - { - super( bootstrap, handler, healthCheck, AcquireTimeoutAction.FAIL, acquireTimeoutMillis, maxConnections, - MAX_PENDING_ACQUIRES, RELEASE_HEALTH_CHECK ); - - this.address = requireNonNull( address ); - this.connector = requireNonNull( connector ); - this.handler = requireNonNull( handler ); - } - - @Override - protected ChannelFuture connectChannel( Bootstrap bootstrap ) - { - ListenerEvent creatingEvent = handler.channelCreating( address ); - ChannelFuture channelFuture = connector.connect( address, bootstrap ); - channelFuture.addListener( future -> - { - if ( future.isSuccess() ) - { - // notify pool handler about a successful connection - Channel channel = channelFuture.channel(); - handler.channelCreated( channel, creatingEvent ); - } - else - { - handler.channelFailedToCreate( address ); - } - } ); - return channelFuture; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelTracker.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelTracker.java deleted file mode 100644 index 949b6049..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/NettyChannelTracker.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.pool; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.group.ChannelGroup; -import io.netty.channel.group.DefaultChannelGroup; -import io.netty.channel.pool.ChannelPoolHandler; -import io.netty.util.concurrent.EventExecutor; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicInteger; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.metrics.ListenerEvent; -import org.neo4j.driver.internal.metrics.MetricsListener; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.serverAddress; - -public class NettyChannelTracker implements ChannelPoolHandler -{ - private final Map addressToInUseChannelCount = new ConcurrentHashMap<>(); - private final Map addressToIdleChannelCount = new ConcurrentHashMap<>(); - private final Logger log; - private final MetricsListener metricsListener; - private final ChannelFutureListener closeListener = future -> channelClosed( future.channel() ); - private final ChannelGroup allChannels; - - public NettyChannelTracker( MetricsListener metricsListener, EventExecutor eventExecutor, Logging logging ) - { - this( metricsListener, new DefaultChannelGroup( "all-connections", eventExecutor ), logging ); - } - - public NettyChannelTracker( MetricsListener metricsListener, ChannelGroup channels, Logging logging ) - { - this.metricsListener = metricsListener; - this.log = logging.getLog( getClass().getSimpleName() ); - this.allChannels = channels; - } - - @Override - public void channelReleased( Channel channel ) - { - log.debug( "Channel [%s] released back to the pool", channel.id() ); - decrementInUse( channel ); - incrementIdle( channel ); - channel.closeFuture().addListener( closeListener ); - } - - @Override - public void channelAcquired( Channel channel ) - { - log.debug( "Channel [%s] acquired from the pool. Local address: %s, remote address: %s", - channel.id(), channel.localAddress(), channel.remoteAddress() ); - - incrementInUse( channel ); - decrementIdle( channel ); - channel.closeFuture().removeListener( closeListener ); - } - - @Override - public void channelCreated( Channel channel ) - { - throw new IllegalStateException( "Untraceable channel created." ); - } - - public void channelCreated( Channel channel, ListenerEvent creatingEvent ) - { - log.debug( "Channel [%s] created. Local address: %s, remote address: %s", - channel.id(), channel.localAddress(), channel.remoteAddress() ); - - incrementInUse( channel ); - metricsListener.afterCreated( serverAddress( channel ), creatingEvent ); - - allChannels.add( channel ); - } - - public ListenerEvent channelCreating( BoltServerAddress address ) - { - ListenerEvent creatingEvent = metricsListener.createListenerEvent(); - metricsListener.beforeCreating( address, creatingEvent ); - return creatingEvent; - } - - public void channelFailedToCreate( BoltServerAddress address ) - { - metricsListener.afterFailedToCreate( address ); - } - - public void channelClosed( Channel channel ) - { - decrementIdle( channel ); - metricsListener.afterClosed( serverAddress( channel ) ); - } - - public int inUseChannelCount( BoltServerAddress address ) - { - AtomicInteger count = addressToInUseChannelCount.get( address ); - return count == null ? 0 : count.get(); - } - - public int idleChannelCount( BoltServerAddress address ) - { - AtomicInteger count = addressToIdleChannelCount.get( address ); - return count == null ? 0 : count.get(); - } - - public void prepareToCloseChannels() - { - for ( Channel channel : allChannels ) - { - BoltProtocol protocol = BoltProtocol.forChannel( channel ); - try - { - protocol.prepareToCloseChannel( channel ); - } - catch ( Throwable e ) - { - // only logging it - log.debug( "Failed to prepare to close Channel %s due to error %s. " + - "It is safe to ignore this error as the channel will be closed despite if it is successfully prepared to close or not.", channel, e.getMessage() ); - } - } - } - - private void incrementInUse( Channel channel ) - { - increment( channel, addressToInUseChannelCount ); - } - - private void decrementInUse( Channel channel ) - { - decrement( channel, addressToInUseChannelCount ); - } - - private void incrementIdle( Channel channel ) - { - increment( channel, addressToIdleChannelCount ); - } - - private void decrementIdle( Channel channel ) - { - decrement( channel, addressToIdleChannelCount ); - } - - private void increment( Channel channel, Map countMap ) - { - BoltServerAddress address = serverAddress( channel ); - AtomicInteger count = countMap.computeIfAbsent( address, k -> new AtomicInteger() ); - count.incrementAndGet(); - } - - private void decrement( Channel channel, Map countMap ) - { - BoltServerAddress address = serverAddress( channel ); - AtomicInteger count = countMap.get( address ); - if ( count == null ) - { - throw new IllegalStateException( "No count exist for address '" + address + "'" ); - } - count.decrementAndGet(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/PoolSettings.java b/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/PoolSettings.java deleted file mode 100644 index 149ed400..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/async/pool/PoolSettings.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.async.pool; - -import java.util.concurrent.TimeUnit; - -public class PoolSettings -{ - public static final int NOT_CONFIGURED = -1; - - public static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 100; - public static final long DEFAULT_IDLE_TIME_BEFORE_CONNECTION_TEST = NOT_CONFIGURED; - public static final long DEFAULT_MAX_CONNECTION_LIFETIME = TimeUnit.HOURS.toMillis( 1 ); - public static final long DEFAULT_CONNECTION_ACQUISITION_TIMEOUT = TimeUnit.SECONDS.toMillis( 60 ); - - private final int maxConnectionPoolSize; - private final long connectionAcquisitionTimeout; - private final long maxConnectionLifetime; - private final long idleTimeBeforeConnectionTest; - - public PoolSettings( int maxConnectionPoolSize, long connectionAcquisitionTimeout, - long maxConnectionLifetime, long idleTimeBeforeConnectionTest ) - { - this.maxConnectionPoolSize = maxConnectionPoolSize; - this.connectionAcquisitionTimeout = connectionAcquisitionTimeout; - this.maxConnectionLifetime = maxConnectionLifetime; - this.idleTimeBeforeConnectionTest = idleTimeBeforeConnectionTest; - } - - public long idleTimeBeforeConnectionTest() - { - return idleTimeBeforeConnectionTest; - } - - public boolean idleTimeBeforeConnectionTestEnabled() - { - return idleTimeBeforeConnectionTest >= 0; - } - - public long maxConnectionLifetime() - { - return maxConnectionLifetime; - } - - public boolean maxConnectionLifetimeEnabled() - { - return maxConnectionLifetime > 0; - } - - public int maxConnectionPoolSize() - { - return maxConnectionPoolSize; - } - - public long connectionAcquisitionTimeout() - { - return connectionAcquisitionTimeout; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/AddressSet.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/AddressSet.java deleted file mode 100644 index 6914f107..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/AddressSet.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.util.Arrays; -import java.util.Set; - -import org.neo4j.driver.internal.BoltServerAddress; - -public class AddressSet -{ - private static final BoltServerAddress[] NONE = {}; - - private volatile BoltServerAddress[] addresses = NONE; - - public BoltServerAddress[] toArray() - { - return addresses; - } - - public int size() - { - return addresses.length; - } - - public synchronized void update( Set addresses ) - { - this.addresses = addresses.toArray( NONE ); - } - - public synchronized void remove( BoltServerAddress address ) - { - BoltServerAddress[] addresses = this.addresses; - if ( addresses != null ) - { - for ( int i = 0; i < addresses.length; i++ ) - { - if ( addresses[i].equals( address ) ) - { - if ( addresses.length == 1 ) - { - this.addresses = NONE; - return; - } - BoltServerAddress[] copy = new BoltServerAddress[addresses.length - 1]; - System.arraycopy( addresses, 0, copy, 0, i ); - System.arraycopy( addresses, i + 1, copy, i, addresses.length - i - 1 ); - this.addresses = copy; - return; - } - } - } - } - - @Override - public String toString() - { - return "AddressSet=" + Arrays.toString( addresses ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterComposition.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterComposition.java deleted file mode 100644 index 50a4feb3..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterComposition.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.util.LinkedHashSet; -import java.util.Objects; -import java.util.Set; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.Record; -import org.neo4j.driver.Value; -import java.util.function.Function; - -public final class ClusterComposition -{ - private static final long MAX_TTL = Long.MAX_VALUE / 1000L; - private static final Function OF_BoltServerAddress = - new Function() - { - @Override - public BoltServerAddress apply( Value value ) - { - return new BoltServerAddress( value.asString() ); - } - }; - - private final Set readers; - private final Set writers; - private final Set routers; - private final long expirationTimestamp; - - private ClusterComposition( long expirationTimestamp ) - { - this.readers = new LinkedHashSet<>(); - this.writers = new LinkedHashSet<>(); - this.routers = new LinkedHashSet<>(); - this.expirationTimestamp = expirationTimestamp; - } - - /** For testing */ - public ClusterComposition( - long expirationTimestamp, - Set readers, - Set writers, - Set routers ) - { - this( expirationTimestamp ); - this.readers.addAll( readers ); - this.writers.addAll( writers ); - this.routers.addAll( routers ); - } - - public boolean hasWriters() - { - return !writers.isEmpty(); - } - - public boolean hasRoutersAndReaders() - { - return !routers.isEmpty() && !readers.isEmpty(); - } - - public Set readers() - { - return new LinkedHashSet<>( readers ); - } - - public Set writers() - { - return new LinkedHashSet<>( writers ); - } - - public Set routers() - { - return new LinkedHashSet<>( routers ); - } - - public long expirationTimestamp() { - return this.expirationTimestamp; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - ClusterComposition that = (ClusterComposition) o; - return expirationTimestamp == that.expirationTimestamp && - Objects.equals( readers, that.readers ) && - Objects.equals( writers, that.writers ) && - Objects.equals( routers, that.routers ); - } - - @Override - public int hashCode() - { - return Objects.hash( readers, writers, routers, expirationTimestamp ); - } - - @Override - public String toString() - { - return "ClusterComposition{" + - "readers=" + readers + - ", writers=" + writers + - ", routers=" + routers + - ", expirationTimestamp=" + expirationTimestamp + - '}'; - } - - public static ClusterComposition parse( Record record, long now ) - { - if ( record == null ) - { - return null; - } - - final ClusterComposition result = new ClusterComposition( expirationTimestamp( now, record ) ); - record.get( "servers" ).asList( new Function() - { - @Override - public Void apply( Value value ) - { - result.servers( value.get( "role" ).asString() ) - .addAll( value.get( "addresses" ).asList( OF_BoltServerAddress ) ); - return null; - } - } ); - return result; - } - - private static long expirationTimestamp( long now, Record record ) - { - long ttl = record.get( "ttl" ).asLong(); - long expirationTimestamp = now + ttl * 1000; - if ( ttl < 0 || ttl >= MAX_TTL || expirationTimestamp < 0 ) - { - expirationTimestamp = Long.MAX_VALUE; - } - return expirationTimestamp; - } - - private Set servers( String role ) - { - switch ( role ) - { - case "READ": - return readers; - case "WRITE": - return writers; - case "ROUTE": - return routers; - default: - throw new IllegalArgumentException( "invalid server role: " + role ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterCompositionProvider.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterCompositionProvider.java deleted file mode 100644 index 65a5c896..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterCompositionProvider.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.spi.Connection; - -public interface ClusterCompositionProvider -{ - CompletionStage getClusterComposition( - CompletionStage connectionStage ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterCompositionResponse.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterCompositionResponse.java deleted file mode 100644 index 461e52cf..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterCompositionResponse.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -public interface ClusterCompositionResponse -{ - ClusterComposition clusterComposition(); - - class Failure implements ClusterCompositionResponse - { - private final RuntimeException error; - - public Failure( RuntimeException t ) - { - this.error = t; - } - - @Override - public ClusterComposition clusterComposition() - { - throw this.error; - } - } - - class Success implements ClusterCompositionResponse - { - private final ClusterComposition cluster; - - public Success( ClusterComposition cluster ) - { - this.cluster = cluster; - } - - @Override - public ClusterComposition clusterComposition() - { - return cluster; - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterRoutingTable.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterRoutingTable.java deleted file mode 100644 index a51141bb..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/ClusterRoutingTable.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.Set; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.AccessMode; - -import static java.lang.String.format; -import static java.util.Arrays.asList; - -public class ClusterRoutingTable implements RoutingTable -{ - private static final int MIN_ROUTERS = 1; - - private final Clock clock; - private volatile long expirationTimeout; - private final AddressSet readers; - private final AddressSet writers; - private final AddressSet routers; - - public ClusterRoutingTable( Clock clock, BoltServerAddress... routingAddresses ) - { - this( clock ); - routers.update( new LinkedHashSet<>( asList( routingAddresses ) ) ); - } - - private ClusterRoutingTable( Clock clock ) - { - this.clock = clock; - this.expirationTimeout = clock.millis() - 1; - - this.readers = new AddressSet(); - this.writers = new AddressSet(); - this.routers = new AddressSet(); - } - - @Override - public boolean isStaleFor( AccessMode mode ) - { - return expirationTimeout < clock.millis() || - routers.size() < MIN_ROUTERS || - mode == AccessMode.READ && readers.size() == 0 || - mode == AccessMode.WRITE && writers.size() == 0; - } - - @Override - public synchronized void update( ClusterComposition cluster ) - { - expirationTimeout = cluster.expirationTimestamp(); - readers.update( cluster.readers() ); - writers.update( cluster.writers() ); - routers.update( cluster.routers() ); - } - - @Override - public synchronized void forget( BoltServerAddress address ) - { - routers.remove( address ); - readers.remove( address ); - writers.remove( address ); - } - - @Override - public AddressSet readers() - { - return readers; - } - - @Override - public AddressSet writers() - { - return writers; - } - - @Override - public AddressSet routers() - { - return routers; - } - - @Override - public Set servers() - { - Set servers = new HashSet<>(); - Collections.addAll( servers, readers.toArray() ); - Collections.addAll( servers, writers.toArray() ); - Collections.addAll( servers, routers.toArray() ); - return servers; - } - - @Override - public void removeWriter( BoltServerAddress toRemove ) - { - writers.remove( toRemove ); - } - - - @Override - public synchronized String toString() - { - return format( "Ttl %s, currentTime %s, routers %s, writers %s, readers %s", - expirationTimeout, clock.millis(), routers, writers, readers ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/DnsResolver.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/DnsResolver.java deleted file mode 100644 index 8c0c877c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/DnsResolver.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Set; -import java.util.stream.Stream; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.net.ServerAddress; -import org.neo4j.driver.net.ServerAddressResolver; - -import static java.util.Collections.singleton; -import static java.util.stream.Collectors.toSet; - -public class DnsResolver implements ServerAddressResolver -{ - private final Logger logger; - - public DnsResolver( Logging logging ) - { - this.logger = logging.getLog( DnsResolver.class.getSimpleName() ); - } - - @Override - public Set resolve( ServerAddress initialRouter ) - { - try - { - return Stream.of( InetAddress.getAllByName( initialRouter.host() ) ) - .map( address -> new BoltServerAddress( initialRouter.host(), address.getHostAddress(), initialRouter.port() ) ) - .collect( toSet() ); - } - catch ( UnknownHostException e ) - { - logger.error( "Failed to resolve address `" + initialRouter + "` to IPs due to error: " + e.getMessage(), e ); - return singleton( initialRouter ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/Rediscovery.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/Rediscovery.java deleted file mode 100644 index 8f315edd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/Rediscovery.java +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import io.netty.util.concurrent.EventExecutorGroup; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ConnectionPool; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.Logger; -import org.neo4j.driver.exceptions.SecurityException; -import org.neo4j.driver.exceptions.ServiceUnavailableException; -import org.neo4j.driver.net.ServerAddressResolver; - -import static java.lang.String.format; -import static java.util.Collections.emptySet; -import static java.util.concurrent.CompletableFuture.completedFuture; -import static java.util.stream.Collectors.toList; -import static org.neo4j.driver.internal.util.Futures.completedWithNull; -import static org.neo4j.driver.internal.util.Futures.failedFuture; - -public class Rediscovery -{ - private static final String NO_ROUTERS_AVAILABLE = "Could not perform discovery. No routing servers available."; - - private final BoltServerAddress initialRouter; - private final RoutingSettings settings; - private final Logger logger; - private final ClusterCompositionProvider provider; - private final ServerAddressResolver resolver; - private final EventExecutorGroup eventExecutorGroup; - - private volatile boolean useInitialRouter; - - public Rediscovery( BoltServerAddress initialRouter, RoutingSettings settings, ClusterCompositionProvider provider, - EventExecutorGroup eventExecutorGroup, ServerAddressResolver resolver, Logger logger ) - { - this( initialRouter, settings, provider, resolver, eventExecutorGroup, logger, true ); - } - - // Test-only constructor - Rediscovery( BoltServerAddress initialRouter, RoutingSettings settings, ClusterCompositionProvider provider, - ServerAddressResolver resolver, EventExecutorGroup eventExecutorGroup, Logger logger, boolean useInitialRouter ) - { - this.initialRouter = initialRouter; - this.settings = settings; - this.logger = logger; - this.provider = provider; - this.resolver = resolver; - this.eventExecutorGroup = eventExecutorGroup; - this.useInitialRouter = useInitialRouter; - } - - /** - * Given the current routing table and connection pool, use the connection composition provider to fetch a new - * cluster composition, which would be used to update the routing table and connection pool. - * - * @param routingTable current routing table. - * @param connectionPool connection pool. - * @return new cluster composition. - */ - public CompletionStage lookupClusterComposition( RoutingTable routingTable, - ConnectionPool connectionPool ) - { - CompletableFuture result = new CompletableFuture<>(); - lookupClusterComposition( routingTable, connectionPool, 0, 0, result ); - return result; - } - - private void lookupClusterComposition( RoutingTable routingTable, ConnectionPool pool, - int failures, long previousDelay, CompletableFuture result ) - { - lookup( routingTable, pool ).whenComplete( ( composition, completionError ) -> - { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( error != null ) - { - result.completeExceptionally( error ); - } - else if ( composition != null ) - { - result.complete( composition ); - } - else - { - int newFailures = failures + 1; - if ( newFailures >= settings.maxRoutingFailures() ) - { - result.completeExceptionally( new ServiceUnavailableException( NO_ROUTERS_AVAILABLE ) ); - } - else - { - long nextDelay = Math.max( settings.retryTimeoutDelay(), previousDelay * 2 ); - logger.info( "Unable to fetch new routing table, will try again in " + nextDelay + "ms" ); - eventExecutorGroup.next().schedule( - () -> lookupClusterComposition( routingTable, pool, newFailures, nextDelay, result ), - nextDelay, TimeUnit.MILLISECONDS - ); - } - } - } ); - } - - private CompletionStage lookup( RoutingTable routingTable, ConnectionPool connectionPool ) - { - CompletionStage compositionStage; - - if ( useInitialRouter ) - { - compositionStage = lookupOnInitialRouterThenOnKnownRouters( routingTable, connectionPool ); - useInitialRouter = false; - } - else - { - compositionStage = lookupOnKnownRoutersThenOnInitialRouter( routingTable, connectionPool ); - } - - return compositionStage.whenComplete( ( composition, error ) -> - { - if ( composition != null && !composition.hasWriters() ) - { - useInitialRouter = true; - } - } ); - } - - private CompletionStage lookupOnKnownRoutersThenOnInitialRouter( RoutingTable routingTable, - ConnectionPool connectionPool ) - { - Set seenServers = new HashSet<>(); - return lookupOnKnownRouters( routingTable, connectionPool, seenServers ).thenCompose( composition -> - { - if ( composition != null ) - { - return completedFuture( composition ); - } - return lookupOnInitialRouter( routingTable, connectionPool, seenServers ); - } ); - } - - private CompletionStage lookupOnInitialRouterThenOnKnownRouters( RoutingTable routingTable, - ConnectionPool connectionPool ) - { - Set seenServers = emptySet(); - return lookupOnInitialRouter( routingTable, connectionPool, seenServers ).thenCompose( composition -> - { - if ( composition != null ) - { - return completedFuture( composition ); - } - return lookupOnKnownRouters( routingTable, connectionPool, new HashSet<>() ); - } ); - } - - private CompletionStage lookupOnKnownRouters( RoutingTable routingTable, - ConnectionPool connectionPool, Set seenServers ) - { - BoltServerAddress[] addresses = routingTable.routers().toArray(); - - CompletableFuture result = completedWithNull(); - for ( BoltServerAddress address : addresses ) - { - result = result.thenCompose( composition -> - { - if ( composition != null ) - { - return completedFuture( composition ); - } - else - { - return lookupOnRouter( address, routingTable, connectionPool ) - .whenComplete( ( ignore, error ) -> seenServers.add( address ) ); - } - } ); - } - return result; - } - - private CompletionStage lookupOnInitialRouter( RoutingTable routingTable, - ConnectionPool connectionPool, Set seenServers ) - { - List addresses; - try - { - addresses = resolve( initialRouter ); - } - catch ( Throwable error ) - { - return failedFuture( error ); - } - addresses.removeAll( seenServers ); - - CompletableFuture result = completedWithNull(); - for ( BoltServerAddress address : addresses ) - { - result = result.thenCompose( composition -> - { - if ( composition != null ) - { - return completedFuture( composition ); - } - return lookupOnRouter( address, routingTable, connectionPool ); - } ); - } - return result; - } - - private CompletionStage lookupOnRouter( BoltServerAddress routerAddress, - RoutingTable routingTable, ConnectionPool connectionPool ) - { - CompletionStage connectionStage = connectionPool.acquire( routerAddress ); - - return provider.getClusterComposition( connectionStage ).handle( ( response, error ) -> - { - Throwable cause = Futures.completionExceptionCause( error ); - if ( cause != null ) - { - return handleRoutingProcedureError( cause, routingTable, routerAddress ); - } - else - { - return response.clusterComposition(); - } - } ); - } - - private ClusterComposition handleRoutingProcedureError( Throwable error, RoutingTable routingTable, - BoltServerAddress routerAddress ) - { - if ( error instanceof SecurityException ) - { - // auth error happened, terminate the discovery procedure immediately - throw new CompletionException( error ); - } - else - { - // connection turned out to be broken - logger.error( format( "Failed to connect to routing server '%s'.", routerAddress ), error ); - routingTable.forget( routerAddress ); - return null; - } - } - - private List resolve( BoltServerAddress address ) - { - return resolver.resolve( address ) - .stream() - .map( BoltServerAddress::from ) - .collect( toList() ); // collect to list to preserve the order - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingContext.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingContext.java deleted file mode 100644 index 67e48618..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingContext.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.net.URI; -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.unmodifiableMap; - -public class RoutingContext -{ - public static final RoutingContext EMPTY = new RoutingContext(); - - private final Map context; - - private RoutingContext() - { - this.context = emptyMap(); - } - - public RoutingContext( URI uri ) - { - this.context = unmodifiableMap( parseParameters( uri ) ); - } - - public boolean isDefined() - { - return !context.isEmpty(); - } - - public Map asMap() - { - return context; - } - - @Override - public String toString() - { - return "RoutingContext" + context; - } - - private static Map parseParameters( URI uri ) - { - String query = uri.getQuery(); - - if ( query == null || query.isEmpty() ) - { - return emptyMap(); - } - - Map parameters = new HashMap<>(); - String[] pairs = query.split( "&" ); - for ( String pair : pairs ) - { - String[] keyValue = pair.split( "=" ); - if ( keyValue.length != 2 ) - { - throw new IllegalArgumentException( - "Invalid parameters: '" + pair + "' in URI '" + uri + "'" ); - } - - String key = trimAndVerify( keyValue[0], "key", uri ); - String value = trimAndVerify( keyValue[1], "value", uri ); - - String previousValue = parameters.put( key, value ); - if ( previousValue != null ) - { - throw new IllegalArgumentException( - "Duplicated query parameters with key '" + key + "' in URI '" + uri + "'" ); - } - } - return parameters; - } - - private static String trimAndVerify( String string, String name, URI uri ) - { - String result = string.trim(); - if ( result.isEmpty() ) - { - throw new IllegalArgumentException( "Illegal empty " + name + " in URI query '" + uri + "'" ); - } - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureClusterCompositionProvider.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureClusterCompositionProvider.java deleted file mode 100644 index 260fad0f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureClusterCompositionProvider.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.util.List; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.exceptions.ProtocolException; -import org.neo4j.driver.exceptions.ServiceUnavailableException; -import org.neo4j.driver.exceptions.value.ValueException; - -import static java.lang.String.format; - -public class RoutingProcedureClusterCompositionProvider implements ClusterCompositionProvider -{ - private static final String PROTOCOL_ERROR_MESSAGE = "Failed to parse '%s' result received from server due to "; - - private final Clock clock; - private final RoutingProcedureRunner routingProcedureRunner; - - public RoutingProcedureClusterCompositionProvider( Clock clock, RoutingSettings settings ) - { - this( clock, new RoutingProcedureRunner( settings.routingContext() ) ); - } - - RoutingProcedureClusterCompositionProvider( Clock clock, RoutingProcedureRunner routingProcedureRunner ) - { - this.clock = clock; - this.routingProcedureRunner = routingProcedureRunner; - } - - @Override - public CompletionStage getClusterComposition( - CompletionStage connectionStage ) - { - return routingProcedureRunner.run( connectionStage ) - .thenApply( this::processRoutingResponse ); - } - - private ClusterCompositionResponse processRoutingResponse( RoutingProcedureResponse response ) - { - if ( !response.isSuccess() ) - { - return new ClusterCompositionResponse.Failure( new ServiceUnavailableException( format( - "Failed to run '%s' on server. " + - "Please make sure that there is a Neo4j 3.1+ causal cluster up running.", - invokedProcedureString( response ) ), response.error() - ) ); - } - - List records = response.records(); - - long now = clock.millis(); - - // the record size is wrong - if ( records.size() != 1 ) - { - return new ClusterCompositionResponse.Failure( new ProtocolException( format( - PROTOCOL_ERROR_MESSAGE + "records received '%s' is too few or too many.", - invokedProcedureString( response ), records.size() ) ) ); - } - - // failed to parse the record - ClusterComposition cluster; - try - { - cluster = ClusterComposition.parse( records.get( 0 ), now ); - } - catch ( ValueException e ) - { - return new ClusterCompositionResponse.Failure( new ProtocolException( format( - PROTOCOL_ERROR_MESSAGE + "unparsable record received.", - invokedProcedureString( response ) ), e ) ); - } - - // the cluster result is not a legal reply - if ( !cluster.hasRoutersAndReaders() ) - { - return new ClusterCompositionResponse.Failure( new ProtocolException( format( - PROTOCOL_ERROR_MESSAGE + "no router or reader found in response.", - invokedProcedureString( response ) ) ) ); - } - - // all good - return new ClusterCompositionResponse.Success( cluster ); - } - - private static String invokedProcedureString( RoutingProcedureResponse response ) - { - Statement statement = response.procedure(); - return statement.text() + " " + statement.parameters(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureResponse.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureResponse.java deleted file mode 100644 index 8997ad13..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureResponse.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.util.List; - -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; - -public class RoutingProcedureResponse -{ - private final Statement procedure; - private final List records; - private final Throwable error; - - public RoutingProcedureResponse( Statement procedure, List records ) - { - this( procedure, records, null ); - } - - public RoutingProcedureResponse( Statement procedure, Throwable error ) - { - this( procedure, null, error ); - } - - private RoutingProcedureResponse( Statement procedure, List records, Throwable error ) - { - this.procedure = procedure; - this.records = records; - this.error = error; - } - - public boolean isSuccess() - { - return records != null; - } - - public Statement procedure() - { - return procedure; - } - - public List records() - { - if ( !isSuccess() ) - { - throw new IllegalStateException( "Can't access records of a failed result", error ); - } - return records; - } - - public Throwable error() - { - if ( isSuccess() ) - { - throw new IllegalStateException( "Can't access error of a succeeded result " + records ); - } - return error; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureRunner.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureRunner.java deleted file mode 100644 index db688e99..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingProcedureRunner.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.util.List; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.async.connection.DecoratedConnection; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.internal.util.ServerVersion; -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.async.StatementResultCursor; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.exceptions.ClientException; - -import static org.neo4j.driver.internal.messaging.request.MultiDatabaseUtil.ABSENT_DB_NAME; -import static org.neo4j.driver.internal.util.ServerVersion.v3_2_0; -import static org.neo4j.driver.Values.parameters; - -public class RoutingProcedureRunner -{ - static final String GET_SERVERS = "dbms.cluster.routing.getServers"; - static final String GET_ROUTING_TABLE_PARAM = "context"; - static final String GET_ROUTING_TABLE = "dbms.cluster.routing.getRoutingTable({" + GET_ROUTING_TABLE_PARAM + "})"; - - private final RoutingContext context; - - public RoutingProcedureRunner( RoutingContext context ) - { - this.context = context; - } - - public CompletionStage run( CompletionStage connectionStage ) - { - return connectionStage.thenCompose( connection -> - { - // Routing procedure will be called on the default database - DecoratedConnection delegate = new DecoratedConnection( connection, ABSENT_DB_NAME, AccessMode.WRITE ); - Statement procedure = procedureStatement( delegate.serverVersion() ); - return runProcedure( delegate, procedure ) - .thenCompose( records -> releaseConnection( delegate, records ) ) - .handle( ( records, error ) -> processProcedureResponse( procedure, records, error ) ); - } ); - } - - CompletionStage> runProcedure( Connection connection, Statement procedure ) - { - return connection.protocol() - .runInAutoCommitTransaction( connection, procedure, BookmarksHolder.NO_OP, TransactionConfig.empty(), true ) - .asyncResult().thenCompose( StatementResultCursor::listAsync ); - } - - private Statement procedureStatement( ServerVersion serverVersion ) - { - if ( serverVersion.greaterThanOrEqual( v3_2_0 ) ) - { - return new Statement( "CALL " + GET_ROUTING_TABLE, - parameters( GET_ROUTING_TABLE_PARAM, context.asMap() ) ); - } - else - { - return new Statement( "CALL " + GET_SERVERS ); - } - } - - private CompletionStage> releaseConnection( Connection connection, List records ) - { - // It is not strictly required to release connection after routing procedure invocation because it'll - // be released by the PULL_ALL response handler after result is fully fetched. Such release will happen - // in background. However, releasing it early as part of whole chain makes it easier to reason about - // rediscovery in stub server tests. Some of them assume connections to instances not present in new - // routing table will be closed immediately. - return connection.release().thenApply( ignore -> records ); - } - - private RoutingProcedureResponse processProcedureResponse( Statement procedure, List records, - Throwable error ) - { - Throwable cause = Futures.completionExceptionCause( error ); - if ( cause != null ) - { - return handleError( procedure, cause ); - } - else - { - return new RoutingProcedureResponse( procedure, records ); - } - } - - private RoutingProcedureResponse handleError( Statement procedure, Throwable error ) - { - if ( error instanceof ClientException ) - { - return new RoutingProcedureResponse( procedure, error ); - } - else - { - throw new CompletionException( error ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingSettings.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingSettings.java deleted file mode 100644 index e5515a1d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingSettings.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import static java.util.concurrent.TimeUnit.SECONDS; - -public class RoutingSettings -{ - public static final RoutingSettings DEFAULT = new RoutingSettings( 1, SECONDS.toMillis( 5 ) ); - - private final int maxRoutingFailures; - private final long retryTimeoutDelay; - private final RoutingContext routingContext; - - public RoutingSettings( int maxRoutingFailures, long retryTimeoutDelay ) - { - this( maxRoutingFailures, retryTimeoutDelay, RoutingContext.EMPTY ); - } - - public RoutingSettings( int maxRoutingFailures, long retryTimeoutDelay, RoutingContext routingContext ) - { - this.maxRoutingFailures = maxRoutingFailures; - this.retryTimeoutDelay = retryTimeoutDelay; - this.routingContext = routingContext; - } - - public RoutingSettings withRoutingContext( RoutingContext newRoutingContext ) - { - return new RoutingSettings( maxRoutingFailures, retryTimeoutDelay, newRoutingContext ); - } - - public int maxRoutingFailures() - { - return maxRoutingFailures; - } - - public long retryTimeoutDelay() - { - return retryTimeoutDelay; - } - - public RoutingContext routingContext() - { - return routingContext; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingTable.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingTable.java deleted file mode 100644 index 91cff535..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/RoutingTable.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster; - -import java.util.Set; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.AccessMode; - -public interface RoutingTable -{ - boolean isStaleFor( AccessMode mode ); - - void update( ClusterComposition cluster ); - - void forget( BoltServerAddress address ); - - AddressSet readers(); - - AddressSet writers(); - - AddressSet routers(); - - Set servers(); - - void removeWriter( BoltServerAddress toRemove ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LeastConnectedLoadBalancingStrategy.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LeastConnectedLoadBalancingStrategy.java deleted file mode 100644 index 679da606..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LeastConnectedLoadBalancingStrategy.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster.loadbalancing; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.spi.ConnectionPool; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -/** - * Load balancing strategy that finds server with least amount of active (checked out of the pool) connections from - * given readers or writers. It finds a start index for iteration in a round-robin fashion. This is done to prevent - * choosing same first address over and over when all addresses have same amount of active connections. - */ -public class LeastConnectedLoadBalancingStrategy implements LoadBalancingStrategy -{ - private static final String LOGGER_NAME = LeastConnectedLoadBalancingStrategy.class.getSimpleName(); - - private final RoundRobinArrayIndex readersIndex = new RoundRobinArrayIndex(); - private final RoundRobinArrayIndex writersIndex = new RoundRobinArrayIndex(); - - private final ConnectionPool connectionPool; - private final Logger log; - - public LeastConnectedLoadBalancingStrategy( ConnectionPool connectionPool, Logging logging ) - { - this.connectionPool = connectionPool; - this.log = logging.getLog( LOGGER_NAME ); - } - - @Override - public BoltServerAddress selectReader( BoltServerAddress[] knownReaders ) - { - return select( knownReaders, readersIndex, "reader" ); - } - - @Override - public BoltServerAddress selectWriter( BoltServerAddress[] knownWriters ) - { - return select( knownWriters, writersIndex, "writer" ); - } - - private BoltServerAddress select( BoltServerAddress[] addresses, RoundRobinArrayIndex addressesIndex, - String addressType ) - { - int size = addresses.length; - if ( size == 0 ) - { - log.trace( "Unable to select %s, no known addresses given", addressType ); - return null; - } - - // choose start index for iteration in round-robin fashion - int startIndex = addressesIndex.next( size ); - int index = startIndex; - - BoltServerAddress leastConnectedAddress = null; - int leastActiveConnections = Integer.MAX_VALUE; - - // iterate over the array to find least connected address - do - { - BoltServerAddress address = addresses[index]; - int activeConnections = connectionPool.inUseConnections( address ); - - if ( activeConnections < leastActiveConnections ) - { - leastConnectedAddress = address; - leastActiveConnections = activeConnections; - } - - // loop over to the start of the array when end is reached - if ( index == size - 1 ) - { - index = 0; - } - else - { - index++; - } - } - while ( index != startIndex ); - - log.trace( "Selected %s with address: '%s' and active connections: %s", - addressType, leastConnectedAddress, leastActiveConnections ); - - return leastConnectedAddress; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LoadBalancer.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LoadBalancer.java deleted file mode 100644 index 93adf8fd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LoadBalancer.java +++ /dev/null @@ -1,285 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster.loadbalancing; - -import io.netty.util.concurrent.EventExecutorGroup; - -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.RoutingErrorHandler; -import org.neo4j.driver.internal.async.connection.DecoratedConnection; -import org.neo4j.driver.internal.async.connection.RoutingConnection; -import org.neo4j.driver.internal.cluster.AddressSet; -import org.neo4j.driver.internal.cluster.ClusterComposition; -import org.neo4j.driver.internal.cluster.ClusterCompositionProvider; -import org.neo4j.driver.internal.cluster.ClusterRoutingTable; -import org.neo4j.driver.internal.cluster.Rediscovery; -import org.neo4j.driver.internal.cluster.RoutingProcedureClusterCompositionProvider; -import org.neo4j.driver.internal.cluster.RoutingSettings; -import org.neo4j.driver.internal.cluster.RoutingTable; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ConnectionPool; -import org.neo4j.driver.internal.spi.ConnectionProvider; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.exceptions.ServiceUnavailableException; -import org.neo4j.driver.exceptions.SessionExpiredException; -import org.neo4j.driver.net.ServerAddressResolver; - -import static java.util.concurrent.CompletableFuture.completedFuture; - -public class LoadBalancer implements ConnectionProvider, RoutingErrorHandler -{ - private static final String LOAD_BALANCER_LOG_NAME = "LoadBalancer"; - - private final ConnectionPool connectionPool; - private final RoutingTable routingTable; - private final Rediscovery rediscovery; - private final LoadBalancingStrategy loadBalancingStrategy; - private final EventExecutorGroup eventExecutorGroup; - private final Logger log; - - private CompletableFuture refreshRoutingTableFuture; - - public LoadBalancer( BoltServerAddress initialRouter, RoutingSettings settings, ConnectionPool connectionPool, - EventExecutorGroup eventExecutorGroup, Clock clock, Logging logging, - LoadBalancingStrategy loadBalancingStrategy, ServerAddressResolver resolver ) - { - this( connectionPool, new ClusterRoutingTable( clock, initialRouter ), - createRediscovery( initialRouter, settings, eventExecutorGroup, resolver, clock, logging ), - loadBalancerLogger( logging ), loadBalancingStrategy, eventExecutorGroup ); - } - - // Used only in testing - LoadBalancer( ConnectionPool connectionPool, RoutingTable routingTable, Rediscovery rediscovery, - EventExecutorGroup eventExecutorGroup, Logging logging ) - { - this( connectionPool, routingTable, rediscovery, loadBalancerLogger( logging ), - new LeastConnectedLoadBalancingStrategy( connectionPool, logging ), - eventExecutorGroup ); - } - - private LoadBalancer( ConnectionPool connectionPool, RoutingTable routingTable, Rediscovery rediscovery, - Logger log, LoadBalancingStrategy loadBalancingStrategy, EventExecutorGroup eventExecutorGroup ) - { - this.connectionPool = connectionPool; - this.routingTable = routingTable; - this.rediscovery = rediscovery; - this.loadBalancingStrategy = loadBalancingStrategy; - this.eventExecutorGroup = eventExecutorGroup; - this.log = log; - } - - @Override - public CompletionStage acquireConnection( String databaseName, AccessMode mode ) - { - return freshRoutingTable( mode ) - .thenCompose( routingTable -> acquire( mode, routingTable ) ) - .thenApply( connection -> new RoutingConnection( connection, mode, this ) ) - .thenApply( connection -> new DecoratedConnection( connection, databaseName, mode ) ); - } - - @Override - public CompletionStage verifyConnectivity() - { - return freshRoutingTable( AccessMode.READ ).thenApply( routingTable -> null ); - } - - @Override - public void onConnectionFailure( BoltServerAddress address ) - { - forget( address ); - } - - @Override - public void onWriteFailure( BoltServerAddress address ) - { - routingTable.removeWriter( address ); - } - - @Override - public CompletionStage close() - { - return connectionPool.close(); - } - - private synchronized void forget( BoltServerAddress address ) - { - // remove from the routing table, to prevent concurrent threads from making connections to this address - routingTable.forget( address ); - } - - private synchronized CompletionStage freshRoutingTable( AccessMode mode ) - { - if ( refreshRoutingTableFuture != null ) - { - // refresh is already happening concurrently, just use it's result - return refreshRoutingTableFuture; - } - else if ( routingTable.isStaleFor( mode ) ) - { - // existing routing table is not fresh and should be updated - log.info( "Routing table is stale. %s", routingTable ); - - CompletableFuture resultFuture = new CompletableFuture<>(); - refreshRoutingTableFuture = resultFuture; - - rediscovery.lookupClusterComposition( routingTable, connectionPool ) - .whenComplete( ( composition, completionError ) -> - { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( error != null ) - { - clusterCompositionLookupFailed( error ); - } - else - { - freshClusterCompositionFetched( composition ); - } - } ); - - return resultFuture; - } - else - { - // existing routing table is fresh, use it - return completedFuture( routingTable ); - } - } - - private synchronized void freshClusterCompositionFetched( ClusterComposition composition ) - { - try - { - routingTable.update( composition ); - connectionPool.retainAll( routingTable.servers() ); - - log.info( "Updated routing table. %s", routingTable ); - - CompletableFuture routingTableFuture = refreshRoutingTableFuture; - refreshRoutingTableFuture = null; - routingTableFuture.complete( routingTable ); - } - catch ( Throwable error ) - { - clusterCompositionLookupFailed( error ); - } - } - - private synchronized void clusterCompositionLookupFailed( Throwable error ) - { - CompletableFuture routingTableFuture = refreshRoutingTableFuture; - refreshRoutingTableFuture = null; - routingTableFuture.completeExceptionally( error ); - } - - private CompletionStage acquire( AccessMode mode, RoutingTable routingTable ) - { - AddressSet addresses = addressSet( mode, routingTable ); - CompletableFuture result = new CompletableFuture<>(); - acquire( mode, addresses, result ); - return result; - } - - private void acquire( AccessMode mode, AddressSet addresses, CompletableFuture result ) - { - BoltServerAddress address = selectAddress( mode, addresses ); - - if ( address == null ) - { - result.completeExceptionally( new SessionExpiredException( - "Failed to obtain connection towards " + mode + " server. " + - "Known routing table is: " + routingTable ) ); - return; - } - - connectionPool.acquire( address ).whenComplete( ( connection, completionError ) -> - { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( error != null ) - { - if ( error instanceof ServiceUnavailableException ) - { - log.error( "Failed to obtain a connection towards address " + address, error ); - forget( address ); - eventExecutorGroup.next().execute( () -> acquire( mode, addresses, result ) ); - } - else - { - result.completeExceptionally( error ); - } - } - else - { - result.complete( connection ); - } - } ); - } - - private static AddressSet addressSet( AccessMode mode, RoutingTable routingTable ) - { - switch ( mode ) - { - case READ: - return routingTable.readers(); - case WRITE: - return routingTable.writers(); - default: - throw unknownMode( mode ); - } - } - - private BoltServerAddress selectAddress( AccessMode mode, AddressSet servers ) - { - BoltServerAddress[] addresses = servers.toArray(); - - switch ( mode ) - { - case READ: - return loadBalancingStrategy.selectReader( addresses ); - case WRITE: - return loadBalancingStrategy.selectWriter( addresses ); - default: - throw unknownMode( mode ); - } - } - - private static Rediscovery createRediscovery( BoltServerAddress initialRouter, RoutingSettings settings, - EventExecutorGroup eventExecutorGroup, ServerAddressResolver resolver, Clock clock, Logging logging ) - { - Logger log = loadBalancerLogger( logging ); - ClusterCompositionProvider clusterCompositionProvider = new RoutingProcedureClusterCompositionProvider( clock, settings ); - return new Rediscovery( initialRouter, settings, clusterCompositionProvider, eventExecutorGroup, resolver, log ); - } - - private static Logger loadBalancerLogger( Logging logging ) - { - return logging.getLog( LOAD_BALANCER_LOG_NAME ); - } - - private static RuntimeException unknownMode( AccessMode mode ) - { - return new IllegalArgumentException( "Mode '" + mode + "' is not supported" ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LoadBalancingStrategy.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LoadBalancingStrategy.java deleted file mode 100644 index ef094d91..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/LoadBalancingStrategy.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster.loadbalancing; - -import org.neo4j.driver.internal.BoltServerAddress; - -/** - * A facility to select most appropriate reader or writer among the given addresses for request processing. - */ -public interface LoadBalancingStrategy -{ - /** - * Select most appropriate read address from the given array of addresses. - * - * @param knownReaders array of all known readers. - * @return most appropriate reader or {@code null} if it can't be selected. - */ - BoltServerAddress selectReader( BoltServerAddress[] knownReaders ); - - /** - * Select most appropriate write address from the given array of addresses. - * - * @param knownWriters array of all known writers. - * @return most appropriate writer or {@code null} if it can't be selected. - */ - BoltServerAddress selectWriter( BoltServerAddress[] knownWriters ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/RoundRobinArrayIndex.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/RoundRobinArrayIndex.java deleted file mode 100644 index 2cfdc0a6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/RoundRobinArrayIndex.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster.loadbalancing; - -import java.util.concurrent.atomic.AtomicInteger; - -public class RoundRobinArrayIndex -{ - private final AtomicInteger offset; - - RoundRobinArrayIndex() - { - this( 0 ); - } - - // only for testing - RoundRobinArrayIndex( int initialOffset ) - { - this.offset = new AtomicInteger( initialOffset ); - } - - public int next( int arrayLength ) - { - if ( arrayLength == 0 ) - { - return -1; - } - - int nextOffset; - while ( (nextOffset = offset.getAndIncrement()) < 0 ) - { - // overflow, try resetting back to zero - offset.compareAndSet( nextOffset + 1, 0 ); - } - return nextOffset % arrayLength; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/RoundRobinLoadBalancingStrategy.java b/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/RoundRobinLoadBalancingStrategy.java deleted file mode 100644 index cbedc758..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cluster/loadbalancing/RoundRobinLoadBalancingStrategy.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cluster.loadbalancing; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -/** - * Load balancing strategy that selects addresses in round-robin fashion. It maintains separate indices for readers and - * writers. - */ -public class RoundRobinLoadBalancingStrategy implements LoadBalancingStrategy -{ - private static final String LOGGER_NAME = RoundRobinLoadBalancingStrategy.class.getSimpleName(); - - private final RoundRobinArrayIndex readersIndex = new RoundRobinArrayIndex(); - private final RoundRobinArrayIndex writersIndex = new RoundRobinArrayIndex(); - - private final Logger log; - - public RoundRobinLoadBalancingStrategy( Logging logging ) - { - this.log = logging.getLog( LOGGER_NAME ); - } - - @Override - public BoltServerAddress selectReader( BoltServerAddress[] knownReaders ) - { - return select( knownReaders, readersIndex, "reader" ); - } - - @Override - public BoltServerAddress selectWriter( BoltServerAddress[] knownWriters ) - { - return select( knownWriters, writersIndex, "writer" ); - } - - private BoltServerAddress select( BoltServerAddress[] addresses, RoundRobinArrayIndex roundRobinIndex, - String addressType ) - { - int length = addresses.length; - if ( length == 0 ) - { - log.trace( "Unable to select %s, no known addresses given", addressType ); - return null; - } - - int index = roundRobinIndex.next( length ); - BoltServerAddress address = addresses[index]; - log.trace( "Selected %s with address: '%s'", addressType, address ); - return address; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/AsyncResultCursorOnlyFactory.java b/src/graiph-driver/java/org/neo4j/driver/internal/cursor/AsyncResultCursorOnlyFactory.java deleted file mode 100644 index 6d657afc..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/AsyncResultCursorOnlyFactory.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cursor; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.async.AsyncStatementResultCursor; -import org.neo4j.driver.internal.handlers.PullAllResponseHandler; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.exceptions.ClientException; - -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.CompletableFuture.completedFuture; -import static org.neo4j.driver.internal.messaging.request.PullAllMessage.PULL_ALL; - -/** - * Used by Bolt V1, V2, V3 - */ -public class AsyncResultCursorOnlyFactory implements StatementResultCursorFactory -{ - protected final Connection connection; - protected final Message runMessage; - protected final RunResponseHandler runHandler; - protected final PullAllResponseHandler pullAllHandler; - private final boolean waitForRunResponse; - - public AsyncResultCursorOnlyFactory( Connection connection, Message runMessage, RunResponseHandler runHandler, - PullAllResponseHandler pullHandler, boolean waitForRunResponse ) - { - requireNonNull( connection ); - requireNonNull( runMessage ); - requireNonNull( runHandler ); - requireNonNull( pullHandler ); - - this.connection = connection; - this.runMessage = runMessage; - this.runHandler = runHandler; - - this.pullAllHandler = pullHandler; - this.waitForRunResponse = waitForRunResponse; - } - - public CompletionStage asyncResult() - { - // only write and flush messages when async result is wanted. - connection.writeAndFlush( runMessage, runHandler, PULL_ALL, pullAllHandler ); - - if ( waitForRunResponse ) - { - // wait for response of RUN before proceeding - return runHandler.runFuture().thenApply( ignore -> new AsyncStatementResultCursor( runHandler, pullAllHandler ) ); - } - else - { - return completedFuture( new AsyncStatementResultCursor( runHandler, pullAllHandler ) ); - } - } - - public CompletionStage rxResult() - { - return Futures.failedFuture( new ClientException( "Driver is connected to the database that does not support driver reactive API. " + - "In order to use the driver reactive API, please upgrade to neo4j 4.0.0 or later." ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/InternalStatementResultCursor.java b/src/graiph-driver/java/org/neo4j/driver/internal/cursor/InternalStatementResultCursor.java deleted file mode 100644 index 4560931d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/InternalStatementResultCursor.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cursor; - -import org.neo4j.driver.internal.FailableCursor; -import org.neo4j.driver.async.StatementResultCursor; - -public interface InternalStatementResultCursor extends StatementResultCursor, FailableCursor -{ -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/InternalStatementResultCursorFactory.java b/src/graiph-driver/java/org/neo4j/driver/internal/cursor/InternalStatementResultCursorFactory.java deleted file mode 100644 index ab4fce64..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/InternalStatementResultCursorFactory.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cursor; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.async.AsyncStatementResultCursor; -import org.neo4j.driver.internal.handlers.PullAllResponseHandler; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.handlers.pulln.BasicPullResponseHandler; -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.request.PullMessage; -import org.neo4j.driver.internal.spi.Connection; - -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.CompletableFuture.completedFuture; - -public class InternalStatementResultCursorFactory implements StatementResultCursorFactory -{ - private final RunResponseHandler runHandler; - private final Connection connection; - - private final BasicPullResponseHandler pullHandler; - private final PullAllResponseHandler pullAllHandler; - private final boolean waitForRunResponse; - private final Message runMessage; - - public InternalStatementResultCursorFactory( Connection connection, Message runMessage, RunResponseHandler runHandler, BasicPullResponseHandler pullHandler, - PullAllResponseHandler pullAllHandler, boolean waitForRunResponse ) - { - requireNonNull( connection ); - requireNonNull( runMessage ); - requireNonNull( runHandler ); - requireNonNull( pullHandler ); - requireNonNull( pullAllHandler ); - - this.connection = connection; - this.runMessage = runMessage; - this.runHandler = runHandler; - this.pullHandler = pullHandler; - this.pullAllHandler = pullAllHandler; - this.waitForRunResponse = waitForRunResponse; - } - - @Override - public CompletionStage asyncResult() - { - // only write and flush messages when async result is wanted. - connection.writeAndFlush( runMessage, runHandler, PullMessage.PULL_ALL, pullAllHandler ); - - if ( waitForRunResponse ) - { - // wait for response of RUN before proceeding - return runHandler.runFuture().thenApply( ignore -> new AsyncStatementResultCursor( runHandler, pullAllHandler ) ); - } - else - { - return completedFuture( new AsyncStatementResultCursor( runHandler, pullAllHandler ) ); - } - } - - @Override - public CompletionStage rxResult() - { - connection.writeAndFlush( runMessage, runHandler ); - // we always wait for run reply - return runHandler.runFuture().thenApply( this::composeRxCursor ); - } - - private RxStatementResultCursor composeRxCursor( Throwable runError ) - { - return new RxStatementResultCursor( runError, runHandler, pullHandler ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/RxStatementResultCursor.java b/src/graiph-driver/java/org/neo4j/driver/internal/cursor/RxStatementResultCursor.java deleted file mode 100644 index 788d419b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/RxStatementResultCursor.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cursor; - -import org.reactivestreams.Subscription; - -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.BiConsumer; - -import org.neo4j.driver.internal.FailableCursor; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.handlers.pulln.BasicPullResponseHandler; -import org.neo4j.driver.Record; -import org.neo4j.driver.summary.ResultSummary; - -import static org.neo4j.driver.internal.handlers.pulln.AbstractBasicPullResponseHandler.DISCARD_RECORD_CONSUMER; - -public class RxStatementResultCursor implements Subscription, FailableCursor -{ - private final RunResponseHandler runHandler; - private final BasicPullResponseHandler pullHandler; - private final Throwable runResponseError; - private final CompletableFuture summaryFuture = new CompletableFuture<>(); - boolean isRecordHandlerInstalled = false; - - public RxStatementResultCursor( RunResponseHandler runHandler, BasicPullResponseHandler pullHandler ) - { - this( null, runHandler, pullHandler ); - } - - public RxStatementResultCursor( Throwable runError, RunResponseHandler runHandler, BasicPullResponseHandler pullHandler ) - { - Objects.requireNonNull( runHandler ); - Objects.requireNonNull( pullHandler ); - assertRunResponseArrived( runHandler ); - - this.runResponseError = runError; - this.runHandler = runHandler; - this.pullHandler = pullHandler; - installSummaryConsumer(); - } - - public List keys() - { - return runHandler.statementKeys(); - } - - public void installRecordConsumer( BiConsumer recordConsumer ) - { - if ( isRecordHandlerInstalled ) - { - return; - } - isRecordHandlerInstalled = true; - pullHandler.installRecordConsumer( recordConsumer ); - assertRunCompletedSuccessfully(); - } - - public void request( long n ) - { - pullHandler.request( n ); - } - - @Override - public void cancel() - { - pullHandler.cancel(); - } - - @Override - public CompletionStage failureAsync() - { - // calling this method will enforce discarding record stream and finish running cypher query - return summaryAsync().thenApply( summary -> (Throwable) null ).exceptionally( error -> error ); - } - - public CompletionStage summaryAsync() - { - if ( !isDone() ) // the summary is called before record streaming - { - installRecordConsumer( DISCARD_RECORD_CONSUMER ); - cancel(); - } - - return this.summaryFuture; - } - - public boolean isDone() - { - return summaryFuture.isDone(); - } - - private void assertRunCompletedSuccessfully() - { - if ( runResponseError != null ) - { - pullHandler.onFailure( runResponseError ); - } - } - - private void installSummaryConsumer() - { - pullHandler.installSummaryConsumer( ( summary, error ) -> { - if ( error != null ) - { - summaryFuture.completeExceptionally( error ); - } - else if ( summary != null ) - { - summaryFuture.complete( summary ); - } - //else (null, null) to indicate a has_more success - } ); - } - - private void assertRunResponseArrived( RunResponseHandler runHandler ) - { - if ( !runHandler.runFuture().isDone() ) - { - throw new IllegalStateException( "Should wait for response of RUN before allowing PULL." ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/StatementResultCursorFactory.java b/src/graiph-driver/java/org/neo4j/driver/internal/cursor/StatementResultCursorFactory.java deleted file mode 100644 index 200d3629..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/cursor/StatementResultCursorFactory.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.cursor; - -import java.util.concurrent.CompletionStage; - -public interface StatementResultCursorFactory -{ - CompletionStage asyncResult(); - - CompletionStage rxResult(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/AbstractPullAllResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/AbstractPullAllResponseHandler.java deleted file mode 100644 index 7ec9c434..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/AbstractPullAllResponseHandler.java +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; - -import org.neo4j.driver.internal.InternalRecord; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.internal.util.Iterables; -import org.neo4j.driver.internal.util.MetadataExtractor; -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; -import org.neo4j.driver.summary.ResultSummary; - -import static java.util.Collections.emptyMap; -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.CompletableFuture.completedFuture; -import static org.neo4j.driver.internal.util.Futures.completedWithNull; -import static org.neo4j.driver.internal.util.Futures.failedFuture; - -public abstract class AbstractPullAllResponseHandler implements PullAllResponseHandler -{ - private static final Queue UNINITIALIZED_RECORDS = Iterables.emptyQueue(); - - static final int RECORD_BUFFER_LOW_WATERMARK = Integer.getInteger( "recordBufferLowWatermark", 300 ); - static final int RECORD_BUFFER_HIGH_WATERMARK = Integer.getInteger( "recordBufferHighWatermark", 1000 ); - - private final Statement statement; - private final RunResponseHandler runResponseHandler; - protected final MetadataExtractor metadataExtractor; - protected final Connection connection; - - // initialized lazily when first record arrives - private Queue records = UNINITIALIZED_RECORDS; - - private boolean autoReadManagementEnabled = true; - private boolean finished; - private Throwable failure; - private ResultSummary summary; - - private boolean ignoreRecords; - private CompletableFuture recordFuture; - private CompletableFuture failureFuture; - - public AbstractPullAllResponseHandler( Statement statement, RunResponseHandler runResponseHandler, Connection connection, MetadataExtractor metadataExtractor ) - { - this.statement = requireNonNull( statement ); - this.runResponseHandler = requireNonNull( runResponseHandler ); - this.metadataExtractor = requireNonNull( metadataExtractor ); - this.connection = requireNonNull( connection ); - } - - @Override - public boolean canManageAutoRead() - { - return true; - } - - @Override - public synchronized void onSuccess( Map metadata ) - { - finished = true; - summary = extractResultSummary( metadata ); - - afterSuccess( metadata ); - - completeRecordFuture( null ); - completeFailureFuture( null ); - } - - protected abstract void afterSuccess( Map metadata ); - - @Override - public synchronized void onFailure( Throwable error ) - { - finished = true; - summary = extractResultSummary( emptyMap() ); - - afterFailure( error ); - - boolean failedRecordFuture = failRecordFuture( error ); - if ( failedRecordFuture ) - { - // error propagated through the record future - completeFailureFuture( null ); - } - else - { - boolean completedFailureFuture = completeFailureFuture( error ); - if ( !completedFailureFuture ) - { - // error has not been propagated to the user, remember it - failure = error; - } - } - } - - protected abstract void afterFailure( Throwable error ); - - @Override - public synchronized void onRecord( Value[] fields ) - { - if ( ignoreRecords ) - { - completeRecordFuture( null ); - } - else - { - Record record = new InternalRecord( runResponseHandler.statementKeys(), fields ); - enqueueRecord( record ); - completeRecordFuture( record ); - } - } - - @Override - public synchronized void disableAutoReadManagement() - { - autoReadManagementEnabled = false; - } - - public synchronized CompletionStage peekAsync() - { - Record record = records.peek(); - if ( record == null ) - { - if ( failure != null ) - { - return failedFuture( extractFailure() ); - } - - if ( ignoreRecords || finished ) - { - return completedWithNull(); - } - - if ( recordFuture == null ) - { - recordFuture = new CompletableFuture<>(); - } - return recordFuture; - } - else - { - return completedFuture( record ); - } - } - - public synchronized CompletionStage nextAsync() - { - return peekAsync().thenApply( ignore -> dequeueRecord() ); - } - - public synchronized CompletionStage summaryAsync() - { - return failureAsync().thenApply( error -> - { - if ( error != null ) - { - throw Futures.asCompletionException( error ); - } - return summary; - } ); - } - - public synchronized CompletionStage consumeAsync() - { - ignoreRecords = true; - records.clear(); - return summaryAsync(); - } - - public synchronized CompletionStage> listAsync( Function mapFunction ) - { - return failureAsync().thenApply( error -> - { - if ( error != null ) - { - throw Futures.asCompletionException( error ); - } - return recordsAsList( mapFunction ); - } ); - } - - public synchronized CompletionStage failureAsync() - { - if ( failure != null ) - { - return completedFuture( extractFailure() ); - } - else if ( finished ) - { - return completedWithNull(); - } - else - { - if ( failureFuture == null ) - { - // neither SUCCESS nor FAILURE message has arrived, register future to be notified when it arrives - // future will be completed with null on SUCCESS and completed with Throwable on FAILURE - // enable auto-read, otherwise we might not read SUCCESS/FAILURE if records are not consumed - enableAutoRead(); - failureFuture = new CompletableFuture<>(); - } - return failureFuture; - } - } - - private void enqueueRecord( Record record ) - { - if ( records == UNINITIALIZED_RECORDS ) - { - records = new ArrayDeque<>(); - } - - records.add( record ); - - boolean shouldBufferAllRecords = failureFuture != null; - // when failure is requested we have to buffer all remaining records and then return the error - // do not disable auto-read in this case, otherwise records will not be consumed and trailing - // SUCCESS or FAILURE message will not arrive as well, so callers will get stuck waiting for the error - if ( !shouldBufferAllRecords && records.size() > RECORD_BUFFER_HIGH_WATERMARK ) - { - // more than high watermark records are already queued, tell connection to stop auto-reading from network - // this is needed to deal with slow consumers, we do not want to buffer all records in memory if they are - // fetched from network faster than consumed - disableAutoRead(); - } - } - - private Record dequeueRecord() - { - Record record = records.poll(); - - if ( records.size() < RECORD_BUFFER_LOW_WATERMARK ) - { - // less than low watermark records are now available in the buffer, tell connection to pre-fetch more - // and populate queue with new records from network - enableAutoRead(); - } - - return record; - } - - private List recordsAsList( Function mapFunction ) - { - if ( !finished ) - { - throw new IllegalStateException( "Can't get records as list because SUCCESS or FAILURE did not arrive" ); - } - - List result = new ArrayList<>( records.size() ); - while ( !records.isEmpty() ) - { - Record record = records.poll(); - result.add( mapFunction.apply( record ) ); - } - return result; - } - - private Throwable extractFailure() - { - if ( failure == null ) - { - throw new IllegalStateException( "Can't extract failure because it does not exist" ); - } - - Throwable error = failure; - failure = null; // propagate failure only once - return error; - } - - private void completeRecordFuture( Record record ) - { - if ( recordFuture != null ) - { - CompletableFuture future = recordFuture; - recordFuture = null; - future.complete( record ); - } - } - - private boolean failRecordFuture( Throwable error ) - { - if ( recordFuture != null ) - { - CompletableFuture future = recordFuture; - recordFuture = null; - future.completeExceptionally( error ); - return true; - } - return false; - } - - private boolean completeFailureFuture( Throwable error ) - { - if ( failureFuture != null ) - { - CompletableFuture future = failureFuture; - failureFuture = null; - future.complete( error ); - return true; - } - return false; - } - - private ResultSummary extractResultSummary( Map metadata ) - { - long resultAvailableAfter = runResponseHandler.resultAvailableAfter(); - return metadataExtractor.extractSummary( statement, connection, resultAvailableAfter, metadata ); - } - - private void enableAutoRead() - { - if ( autoReadManagementEnabled ) - { - connection.enableAutoRead(); - } - } - - private void disableAutoRead() - { - if ( autoReadManagementEnabled ) - { - connection.disableAutoRead(); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/BeginTxResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/BeginTxResponseHandler.java deleted file mode 100644 index 7d2c77d2..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/BeginTxResponseHandler.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.Value; - -import static java.util.Objects.requireNonNull; - -public class BeginTxResponseHandler implements ResponseHandler -{ - private final CompletableFuture beginTxFuture; - - public BeginTxResponseHandler( CompletableFuture beginTxFuture ) - { - this.beginTxFuture = requireNonNull( beginTxFuture ); - } - - @Override - public void onSuccess( Map metadata ) - { - beginTxFuture.complete( null ); - } - - @Override - public void onFailure( Throwable error ) - { - beginTxFuture.completeExceptionally( error ); - } - - @Override - public void onRecord( Value[] fields ) - { - throw new UnsupportedOperationException( - "Transaction begin is not expected to receive records: " + Arrays.toString( fields ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/ChannelReleasingResetResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/ChannelReleasingResetResponseHandler.java deleted file mode 100644 index d96dfc4b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/ChannelReleasingResetResponseHandler.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import io.netty.channel.Channel; -import io.netty.channel.pool.ChannelPool; -import io.netty.util.concurrent.Future; - -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.internal.async.inbound.InboundMessageDispatcher; -import org.neo4j.driver.internal.util.Clock; - -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.setLastUsedTimestamp; - -public class ChannelReleasingResetResponseHandler extends ResetResponseHandler -{ - private final Channel channel; - private final ChannelPool pool; - private final Clock clock; - - public ChannelReleasingResetResponseHandler( Channel channel, ChannelPool pool, - InboundMessageDispatcher messageDispatcher, Clock clock, CompletableFuture releaseFuture ) - { - super( messageDispatcher, releaseFuture ); - this.channel = channel; - this.pool = pool; - this.clock = clock; - } - - @Override - protected void resetCompleted( CompletableFuture completionFuture, boolean success ) - { - if ( success ) - { - // update the last-used timestamp before returning the channel back to the pool - setLastUsedTimestamp( channel, clock.millis() ); - } - else - { - // close the channel before returning it back to the pool if RESET failed - channel.close(); - } - - Future released = pool.release( channel ); - released.addListener( ignore -> completionFuture.complete( null ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/CommitTxResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/CommitTxResponseHandler.java deleted file mode 100644 index d2602f9f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/CommitTxResponseHandler.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.internal.Bookmarks; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.Value; - -import static java.util.Objects.requireNonNull; - -public class CommitTxResponseHandler implements ResponseHandler -{ - private final CompletableFuture commitFuture; - - public CommitTxResponseHandler( CompletableFuture commitFuture ) - { - this.commitFuture = requireNonNull( commitFuture ); - } - - @Override - public void onSuccess( Map metadata ) - { - Value bookmarkValue = metadata.get( "bookmark" ); - if ( bookmarkValue == null ) - { - commitFuture.complete( null ); - } - else - { - commitFuture.complete( Bookmarks.from( bookmarkValue.asString() ) ); - } - } - - @Override - public void onFailure( Throwable error ) - { - commitFuture.completeExceptionally( error ); - } - - @Override - public void onRecord( Value[] fields ) - { - throw new UnsupportedOperationException( - "Transaction commit is not expected to receive records: " + Arrays.toString( fields ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/HelloResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/HelloResponseHandler.java deleted file mode 100644 index ec088f93..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/HelloResponseHandler.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelPromise; - -import java.util.Map; - -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.ServerVersion; -import org.neo4j.driver.Value; - -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.setConnectionId; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.setServerVersion; -import static org.neo4j.driver.internal.util.MetadataExtractor.extractNeo4jServerVersion; - -public class HelloResponseHandler implements ResponseHandler -{ - private static final String CONNECTION_ID_METADATA_KEY = "connection_id"; - - private final ChannelPromise connectionInitializedPromise; - private final Channel channel; - - public HelloResponseHandler( ChannelPromise connectionInitializedPromise ) - { - this.connectionInitializedPromise = connectionInitializedPromise; - this.channel = connectionInitializedPromise.channel(); - } - - @Override - public void onSuccess( Map metadata ) - { - try - { - ServerVersion serverVersion = extractNeo4jServerVersion( metadata ); - setServerVersion( channel, serverVersion ); - - String connectionId = extractConnectionId( metadata ); - setConnectionId( channel, connectionId ); - - connectionInitializedPromise.setSuccess(); - } - catch ( Throwable error ) - { - onFailure( error ); - throw error; - } - } - - @Override - public void onFailure( Throwable error ) - { - channel.close().addListener( future -> connectionInitializedPromise.setFailure( error ) ); - } - - @Override - public void onRecord( Value[] fields ) - { - throw new UnsupportedOperationException(); - } - - private static String extractConnectionId( Map metadata ) - { - Value value = metadata.get( CONNECTION_ID_METADATA_KEY ); - if ( value == null || value.isNull() ) - { - throw new IllegalStateException( "Unable to extract " + CONNECTION_ID_METADATA_KEY + " from a response to HELLO message. " + - "Received metadata: " + metadata ); - } - return value.asString(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/InitResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/InitResponseHandler.java deleted file mode 100644 index f03414bf..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/InitResponseHandler.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.ChannelPromise; - -import java.util.Map; - -import org.neo4j.driver.internal.async.outbound.OutboundMessageHandler; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.ServerVersion; -import org.neo4j.driver.Value; - -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.setServerVersion; -import static org.neo4j.driver.internal.util.MetadataExtractor.extractNeo4jServerVersion; - -public class InitResponseHandler implements ResponseHandler -{ - private final ChannelPromise connectionInitializedPromise; - private final Channel channel; - - public InitResponseHandler( ChannelPromise connectionInitializedPromise ) - { - this.connectionInitializedPromise = connectionInitializedPromise; - this.channel = connectionInitializedPromise.channel(); - } - - @Override - public void onSuccess( Map metadata ) - { - try - { - ServerVersion serverVersion = extractNeo4jServerVersion( metadata ); - setServerVersion( channel, serverVersion ); - updatePipelineIfNeeded( serverVersion, channel.pipeline() ); - connectionInitializedPromise.setSuccess(); - } - catch ( Throwable error ) - { - connectionInitializedPromise.setFailure( error ); - throw error; - } - } - - @Override - public void onFailure( Throwable error ) - { - channel.close().addListener( future -> connectionInitializedPromise.setFailure( error ) ); - } - - @Override - public void onRecord( Value[] fields ) - { - throw new UnsupportedOperationException(); - } - - private static void updatePipelineIfNeeded( ServerVersion serverVersion, ChannelPipeline pipeline ) - { - if ( serverVersion.lessThan( ServerVersion.v3_2_0 ) ) - { - OutboundMessageHandler outboundHandler = pipeline.get( OutboundMessageHandler.class ); - if ( outboundHandler != null ) - { - pipeline.replace( outboundHandler, OutboundMessageHandler.NAME, outboundHandler.withoutByteArraySupport() ); - } - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/NoOpResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/NoOpResponseHandler.java deleted file mode 100644 index 62a5e078..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/NoOpResponseHandler.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.Map; - -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.Value; - -public class NoOpResponseHandler implements ResponseHandler -{ - public static final NoOpResponseHandler INSTANCE = new NoOpResponseHandler(); - - @Override - public void onSuccess( Map metadata ) - { - } - - @Override - public void onFailure( Throwable error ) - { - } - - @Override - public void onRecord( Value[] fields ) - { - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PingResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PingResponseHandler.java deleted file mode 100644 index 582bd1b0..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PingResponseHandler.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import io.netty.channel.Channel; -import io.netty.util.concurrent.Promise; - -import java.util.Map; - -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Value; - -public class PingResponseHandler implements ResponseHandler -{ - private final Promise result; - private final Channel channel; - private final Logger log; - - public PingResponseHandler( Promise result, Channel channel, Logger log ) - { - this.result = result; - this.channel = channel; - this.log = log; - } - - @Override - public void onSuccess( Map metadata ) - { - log.trace( "Channel %s pinged successfully", channel ); - result.setSuccess( true ); - } - - @Override - public void onFailure( Throwable error ) - { - log.trace( "Channel %s failed ping %s", channel, error ); - result.setSuccess( false ); - } - - @Override - public void onRecord( Value[] fields ) - { - throw new UnsupportedOperationException(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PullAllResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PullAllResponseHandler.java deleted file mode 100644 index 0dcbd3b3..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PullAllResponseHandler.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.List; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.Record; -import org.neo4j.driver.summary.ResultSummary; -import java.util.function.Function; - -public interface PullAllResponseHandler extends ResponseHandler -{ - CompletionStage summaryAsync(); - - CompletionStage nextAsync(); - - CompletionStage peekAsync(); - - CompletionStage consumeAsync(); - - CompletionStage> listAsync( Function mapFunction ); - - CompletionStage failureAsync(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PullHandlers.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PullHandlers.java deleted file mode 100644 index fef1eac3..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/PullHandlers.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.handlers.pulln.BasicPullResponseHandler; -import org.neo4j.driver.internal.handlers.pulln.SessionPullResponseHandler; -import org.neo4j.driver.internal.handlers.pulln.TransactionPullResponseHandler; -import org.neo4j.driver.internal.messaging.v1.BoltProtocolV1; -import org.neo4j.driver.internal.messaging.v3.BoltProtocolV3; -import org.neo4j.driver.internal.spi.Connection; - -public class PullHandlers -{ - public static AbstractPullAllResponseHandler newBoltV1PullAllHandler( Statement statement, RunResponseHandler runHandler, - Connection connection, ExplicitTransaction tx ) - { - if ( tx != null ) - { - return new TransactionPullAllResponseHandler( statement, runHandler, connection, tx, BoltProtocolV1.METADATA_EXTRACTOR ); - } - return new SessionPullAllResponseHandler( statement, runHandler, connection, BookmarksHolder.NO_OP, BoltProtocolV1.METADATA_EXTRACTOR ); - } - - public static AbstractPullAllResponseHandler newBoltV3PullAllHandler( Statement statement, RunResponseHandler runHandler, Connection connection, - BookmarksHolder bookmarksHolder, ExplicitTransaction tx ) - { - if ( tx != null ) - { - return new TransactionPullAllResponseHandler( statement, runHandler, connection, tx, BoltProtocolV3.METADATA_EXTRACTOR ); - } - return new SessionPullAllResponseHandler( statement, runHandler, connection, bookmarksHolder, BoltProtocolV3.METADATA_EXTRACTOR ); - } - - public static BasicPullResponseHandler newBoltV4PullHandler( Statement statement, RunResponseHandler runHandler, Connection connection, - BookmarksHolder bookmarksHolder, ExplicitTransaction tx ) - { - if ( tx != null ) - { - return new TransactionPullResponseHandler( statement, runHandler, connection, tx, BoltProtocolV3.METADATA_EXTRACTOR ); - } - return new SessionPullResponseHandler( statement, runHandler, connection, bookmarksHolder, BoltProtocolV3.METADATA_EXTRACTOR ); - } - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/ResetResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/ResetResponseHandler.java deleted file mode 100644 index bff578ad..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/ResetResponseHandler.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.internal.async.inbound.InboundMessageDispatcher; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.Value; - -public class ResetResponseHandler implements ResponseHandler -{ - private final InboundMessageDispatcher messageDispatcher; - private final CompletableFuture completionFuture; - - public ResetResponseHandler( InboundMessageDispatcher messageDispatcher ) - { - this( messageDispatcher, null ); - } - - public ResetResponseHandler( InboundMessageDispatcher messageDispatcher, CompletableFuture completionFuture ) - { - this.messageDispatcher = messageDispatcher; - this.completionFuture = completionFuture; - } - - @Override - public final void onSuccess( Map metadata ) - { - resetCompleted( true ); - } - - @Override - public final void onFailure( Throwable error ) - { - resetCompleted( false ); - } - - @Override - public final void onRecord( Value[] fields ) - { - throw new UnsupportedOperationException(); - } - - private void resetCompleted( boolean success ) - { - messageDispatcher.clearCurrentError(); - if ( completionFuture != null ) - { - resetCompleted( completionFuture, success ); - } - } - - protected void resetCompleted( CompletableFuture completionFuture, boolean success ) - { - completionFuture.complete( null ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RollbackTxResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RollbackTxResponseHandler.java deleted file mode 100644 index 2815d343..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RollbackTxResponseHandler.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.Value; - -import static java.util.Objects.requireNonNull; - -public class RollbackTxResponseHandler implements ResponseHandler -{ - private final CompletableFuture rollbackFuture; - - public RollbackTxResponseHandler( CompletableFuture rollbackFuture ) - { - this.rollbackFuture = requireNonNull( rollbackFuture ); - } - - @Override - public void onSuccess( Map metadata ) - { - rollbackFuture.complete( null ); - } - - @Override - public void onFailure( Throwable error ) - { - rollbackFuture.completeExceptionally( error ); - } - - @Override - public void onRecord( Value[] fields ) - { - throw new UnsupportedOperationException( - "Transaction rollback is not expected to receive records: " + Arrays.toString( fields ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RoutingResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RoutingResponseHandler.java deleted file mode 100644 index f326e19a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RoutingResponseHandler.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.Map; -import java.util.Objects; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.RoutingErrorHandler; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Value; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.exceptions.ServiceUnavailableException; -import org.neo4j.driver.exceptions.SessionExpiredException; -import org.neo4j.driver.exceptions.TransientException; - -import static java.lang.String.format; - -public class RoutingResponseHandler implements ResponseHandler -{ - private final ResponseHandler delegate; - private final BoltServerAddress address; - private final AccessMode accessMode; - private final RoutingErrorHandler errorHandler; - - public RoutingResponseHandler( ResponseHandler delegate, BoltServerAddress address, AccessMode accessMode, - RoutingErrorHandler errorHandler ) - { - this.delegate = delegate; - this.address = address; - this.accessMode = accessMode; - this.errorHandler = errorHandler; - } - - @Override - public void onSuccess( Map metadata ) - { - delegate.onSuccess( metadata ); - } - - @Override - public void onFailure( Throwable error ) - { - Throwable newError = handledError( error ); - delegate.onFailure( newError ); - } - - @Override - public void onRecord( Value[] fields ) - { - delegate.onRecord( fields ); - } - - @Override - public boolean canManageAutoRead() - { - return delegate.canManageAutoRead(); - } - - @Override - public void disableAutoReadManagement() - { - delegate.disableAutoReadManagement(); - } - - private Throwable handledError( Throwable receivedError ) - { - Throwable error = Futures.completionExceptionCause( receivedError ); - - if ( error instanceof ServiceUnavailableException ) - { - return handledServiceUnavailableException( ((ServiceUnavailableException) error) ); - } - else if ( error instanceof ClientException ) - { - return handledClientException( ((ClientException) error) ); - } - else if ( error instanceof TransientException ) - { - return handledTransientException( ((TransientException) error) ); - } - else - { - return error; - } - } - - private Throwable handledServiceUnavailableException( ServiceUnavailableException e ) - { - errorHandler.onConnectionFailure( address ); - return new SessionExpiredException( format( "Server at %s is no longer available", address ), e ); - } - - private Throwable handledTransientException( TransientException e ) - { - String errorCode = e.code(); - if ( Objects.equals( errorCode, "Neo.TransientError.General.DatabaseUnavailable" ) ) - { - errorHandler.onConnectionFailure( address ); - } - return e; - } - - private Throwable handledClientException( ClientException e ) - { - if ( isFailureToWrite( e ) ) - { - // The server is unaware of the session mode, so we have to implement this logic in the driver. - // In the future, we might be able to move this logic to the server. - switch ( accessMode ) - { - case READ: - return new ClientException( "Write queries cannot be performed in READ access mode." ); - case WRITE: - errorHandler.onWriteFailure( address ); - return new SessionExpiredException( format( "Server at %s no longer accepts writes", address ) ); - default: - throw new IllegalArgumentException( accessMode + " not supported." ); - } - } - return e; - } - - private static boolean isFailureToWrite( ClientException e ) - { - String errorCode = e.code(); - return Objects.equals( errorCode, "Neo.ClientError.Cluster.NotALeader" ) || - Objects.equals( errorCode, "Neo.ClientError.General.ForbiddenOnReadOnlyDatabase" ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RunResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RunResponseHandler.java deleted file mode 100644 index d9eb994e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/RunResponseHandler.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.MetadataExtractor; -import org.neo4j.driver.Value; - -import static java.util.Collections.emptyList; - -public class RunResponseHandler implements ResponseHandler -{ - private final CompletableFuture runCompletedFuture; - private final MetadataExtractor metadataExtractor; - private long statementId = MetadataExtractor.ABSENT_QUERY_ID; - - private List statementKeys = emptyList(); - private long resultAvailableAfter = -1; - - public RunResponseHandler( MetadataExtractor metadataExtractor ) - { - this( new CompletableFuture<>(), metadataExtractor ); - } - - public RunResponseHandler( CompletableFuture runCompletedFuture, MetadataExtractor metadataExtractor ) - { - this.runCompletedFuture = runCompletedFuture; - this.metadataExtractor = metadataExtractor; - } - - @Override - public void onSuccess( Map metadata ) - { - statementKeys = metadataExtractor.extractStatementKeys( metadata ); - resultAvailableAfter = metadataExtractor.extractResultAvailableAfter( metadata ); - statementId = metadataExtractor.extractQueryId( metadata ); - - completeRunFuture( null ); - } - - @Override - public void onFailure( Throwable error ) - { - completeRunFuture( error ); - } - - @Override - public void onRecord( Value[] fields ) - { - throw new UnsupportedOperationException(); - } - - public List statementKeys() - { - return statementKeys; - } - - public long resultAvailableAfter() - { - return resultAvailableAfter; - } - - public long statementId() - { - return statementId; - } - - /** - * Complete the given future with error if the future was failed. - * Future is never completed exceptionally. - * Async API needs to wait for RUN because it needs to access statement keys. - * Reactive API needs to know if RUN failed by checking the error. - */ - private void completeRunFuture( Throwable error ) - { - runCompletedFuture.complete( error ); - } - - public CompletableFuture runFuture() - { - return runCompletedFuture; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/SessionPullAllResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/SessionPullAllResponseHandler.java deleted file mode 100644 index 57de23da..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/SessionPullAllResponseHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.Map; - -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.MetadataExtractor; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; - -import static java.util.Objects.requireNonNull; - -public class SessionPullAllResponseHandler extends AbstractPullAllResponseHandler -{ - private final BookmarksHolder bookmarksHolder; - - public SessionPullAllResponseHandler( Statement statement, RunResponseHandler runResponseHandler, - Connection connection, BookmarksHolder bookmarksHolder, MetadataExtractor metadataExtractor ) - { - super( statement, runResponseHandler, connection, metadataExtractor ); - this.bookmarksHolder = requireNonNull( bookmarksHolder ); - } - - @Override - protected void afterSuccess( Map metadata ) - { - releaseConnection(); - bookmarksHolder.setBookmarks( metadataExtractor.extractBookmarks( metadata ) ); - } - - @Override - protected void afterFailure( Throwable error ) - { - releaseConnection(); - } - - private void releaseConnection() - { - connection.release(); // release in background - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/TransactionPullAllResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/TransactionPullAllResponseHandler.java deleted file mode 100644 index c9ee2244..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/TransactionPullAllResponseHandler.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers; - -import java.util.Map; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.MetadataExtractor; - -import static java.util.Objects.requireNonNull; - -public class TransactionPullAllResponseHandler extends AbstractPullAllResponseHandler -{ - private final ExplicitTransaction tx; - - public TransactionPullAllResponseHandler( Statement statement, RunResponseHandler runResponseHandler, - Connection connection, ExplicitTransaction tx, MetadataExtractor metadataExtractor ) - { - super( statement, runResponseHandler, connection, metadataExtractor ); - this.tx = requireNonNull( tx ); - } - - @Override - protected void afterSuccess( Map metadata ) - { - } - - @Override - protected void afterFailure( Throwable error ) - { - // always mark transaction as terminated because every error is "acknowledged" with a RESET message - // so database forgets about the transaction after the first error - // such transaction should not attempt to commit and can be considered as rolled back - tx.markTerminated(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/AbstractBasicPullResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/AbstractBasicPullResponseHandler.java deleted file mode 100644 index 7ee433d2..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/AbstractBasicPullResponseHandler.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers.pulln; - -import java.util.Map; -import java.util.function.BiConsumer; - -import org.neo4j.driver.internal.InternalRecord; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.messaging.request.PullMessage; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.MetadataExtractor; -import org.neo4j.driver.internal.value.BooleanValue; -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; -import org.neo4j.driver.summary.ResultSummary; - -import static java.lang.String.format; -import static java.util.Collections.emptyMap; -import static java.util.Objects.requireNonNull; -import static org.neo4j.driver.internal.messaging.request.DiscardMessage.newDiscardAllMessage; - -/** - * In this class we have a hidden state machine. - * Here is how it looks like: - * | | DONE | FAILED | STREAMING | READY | CANCELED | - * |--------------------|------|--------|--------------------------------|--------------------|----------------| - * | request | X | X | toRequest++ ->STREAMING | PULL ->STREAMING | X | - * | cancel | X | X | ->CANCELED | DISCARD ->CANCELED | ->CANCELED | - * | onSuccess has_more | X | X | ->READY request if toRequest>0 | X | ->READY cancel | - * | onSuccess | X | X | summary ->DONE | X | summary ->DONE | - * | onRecord | X | X | yield record ->STREAMING | X | ->CANCELED | - * | onFailure | X | X | ->FAILED | X | ->FAILED | - * - * Currently the error state (marked with X on the table above) might not be enforced. - */ -public abstract class AbstractBasicPullResponseHandler implements BasicPullResponseHandler -{ - public static final BiConsumer DISCARD_RECORD_CONSUMER = ( record, throwable ) -> {/*do nothing*/}; - - private final Statement statement; - protected final RunResponseHandler runResponseHandler; - protected final MetadataExtractor metadataExtractor; - protected final Connection connection; - - private Status status = Status.READY; - private long toRequest; - private BiConsumer recordConsumer = null; - private BiConsumer summaryConsumer = null; - - protected abstract void afterSuccess( Map metadata ); - - protected abstract void afterFailure( Throwable error ); - - public AbstractBasicPullResponseHandler( Statement statement, RunResponseHandler runResponseHandler, Connection connection, MetadataExtractor metadataExtractor ) - { - this.statement = requireNonNull( statement ); - this.runResponseHandler = requireNonNull( runResponseHandler ); - this.metadataExtractor = requireNonNull( metadataExtractor ); - this.connection = requireNonNull( connection ); - } - - @Override - public synchronized void onSuccess( Map metadata ) - { - assertRecordAndSummaryConsumerInstalled(); - if ( metadata.getOrDefault( "has_more", BooleanValue.FALSE ).asBoolean() ) - { - handleSuccessWithHasMore(); - } - else - { - handleSuccessWithSummary( metadata ); - } - } - - @Override - public synchronized void onFailure( Throwable error ) - { - assertRecordAndSummaryConsumerInstalled(); - status = Status.FAILED; - afterFailure( error ); - - complete( extractResultSummary( emptyMap() ), error ); - } - - @Override - public synchronized void onRecord( Value[] fields ) - { - assertRecordAndSummaryConsumerInstalled(); - if ( isStreaming() ) - { - Record record = new InternalRecord( runResponseHandler.statementKeys(), fields ); - recordConsumer.accept( record, null ); - } - } - - @Override - public synchronized void request( long size ) - { - assertRecordAndSummaryConsumerInstalled(); - if ( isStreamingPaused() ) - { - connection.writeAndFlush( new PullMessage( size, runResponseHandler.statementId() ), this ); - status = Status.STREAMING; - } - else if ( isStreaming() ) - { - addToRequest( size ); - } - } - - @Override - public synchronized void cancel() - { - assertRecordAndSummaryConsumerInstalled(); - if ( isStreamingPaused() ) - { - // Reactive API does not provide a way to discard N. Only discard all. - connection.writeAndFlush( newDiscardAllMessage( runResponseHandler.statementId() ), this ); - status = Status.CANCELED; - } - else if ( isStreaming() ) - { - status = Status.CANCELED; - } - // no need to change status if it is already done - } - - @Override - public synchronized void installSummaryConsumer( BiConsumer summaryConsumer ) - { - if( this.summaryConsumer != null ) - { - throw new IllegalStateException( "Summary consumer already installed." ); - } - this.summaryConsumer = summaryConsumer; - } - - @Override - public synchronized void installRecordConsumer( BiConsumer recordConsumer ) - { - if( this.recordConsumer != null ) - { - throw new IllegalStateException( "Record consumer already installed." ); - } - this.recordConsumer = recordConsumer; - } - - private boolean isStreaming() - { - return status == Status.STREAMING; - } - - private boolean isStreamingPaused() - { - return status == Status.READY; - } - - private boolean isFinished() - { - return status == Status.DONE || status == Status.FAILED; - } - - private void handleSuccessWithSummary( Map metadata ) - { - status = Status.DONE; - afterSuccess( metadata ); - ResultSummary summary = extractResultSummary( metadata ); - - complete( summary, null ); - } - - private void handleSuccessWithHasMore() - { - if ( this.status == Status.CANCELED ) - { - this.status = Status.READY; // cancel request accepted. - cancel(); - } - else if ( this.status == Status.STREAMING ) - { - this.status = Status.READY; - if ( toRequest > 0 ) - { - request( toRequest ); - toRequest = 0; - } - // summary consumer use (null, null) to identify done handling of success with has_more - summaryConsumer.accept( null, null ); - } - } - - private ResultSummary extractResultSummary( Map metadata ) - { - long resultAvailableAfter = runResponseHandler.resultAvailableAfter(); - return metadataExtractor.extractSummary( statement, connection, resultAvailableAfter, metadata ); - } - - private void addToRequest( long toAdd ) - { - if ( toAdd <= 0 ) - { - throw new IllegalArgumentException( "Cannot request record amount that is less than or equal to 0. Request amount: " + toAdd ); - } - toRequest += toAdd; - if ( toRequest <= 0 ) // toAdd is already at least 1, we hit buffer overflow - { - toRequest = Long.MAX_VALUE; - } - } - - private void assertRecordAndSummaryConsumerInstalled() - { - if( isFinished() ) - { - // no need to check if we've finished. - return; - } - if( recordConsumer == null || summaryConsumer == null ) - { - throw new IllegalStateException( format("Access record stream without record consumer and/or summary consumer. " + - "Record consumer=%s, Summary consumer=%s", recordConsumer, summaryConsumer) ); - } - } - - private void complete( ResultSummary summary, Throwable error ) - { - // we first inform the summary consumer to ensure when streaming finished, summary is definitely available. - if ( recordConsumer == DISCARD_RECORD_CONSUMER ) - { - // we will report the error to summary if there is no record consumer - summaryConsumer.accept( summary, error ); - } - else - { - // we will not inform the error to summary as the error will be reported to record consumer - summaryConsumer.accept( summary, null ); - } - - // record consumer use (null, null) to identify the end of record stream - recordConsumer.accept( null, error ); - dispose(); - } - - private void dispose() - { - // release the reference to the consumers who hold the reference to subscribers which shall be released when subscription is completed. - this.recordConsumer = null; - this.summaryConsumer = null; - } - - protected Status status() - { - return this.status; - } - - protected void status( Status status ) - { - this.status = status; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/BasicPullResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/BasicPullResponseHandler.java deleted file mode 100644 index f0bc3b70..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/BasicPullResponseHandler.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers.pulln; - -import org.reactivestreams.Subscription; - -import java.util.function.BiConsumer; - -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.Record; -import org.neo4j.driver.summary.ResultSummary; - -public interface BasicPullResponseHandler extends ResponseHandler, Subscription -{ - /** - * Register a record consumer for each record received. - * STREAMING shall not be started before this consumer is registered. - * A null record with no error indicates the end of streaming. - * @param recordConsumer register a record consumer to be notified for each record received. - */ - void installRecordConsumer( BiConsumer recordConsumer ); - - /** - * Register a summary consumer to be notified when a summary is received. - * STREAMING shall not be started before this consumer is registered. - * A null summary with no error indicates a SUCCESS message with has_more=true has arrived. - * @param summaryConsumer register a summary consumer - */ - void installSummaryConsumer( BiConsumer summaryConsumer ); - - enum Status - { - DONE, // successfully completed - FAILED, // failed - CANCELED, // canceled - STREAMING, // streaming records - READY // steaming is paused. ready to accept request or cancel commands from user - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/SessionPullResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/SessionPullResponseHandler.java deleted file mode 100644 index 14b6b8a5..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/SessionPullResponseHandler.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers.pulln; - -import java.util.Map; - -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.MetadataExtractor; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; - -import static java.util.Objects.requireNonNull; - -public class SessionPullResponseHandler extends AbstractBasicPullResponseHandler -{ - private final BookmarksHolder bookmarksHolder; - - public SessionPullResponseHandler( Statement statement, RunResponseHandler runResponseHandler, - Connection connection, BookmarksHolder bookmarksHolder, MetadataExtractor metadataExtractor ) - { - super( statement, runResponseHandler, connection, metadataExtractor ); - this.bookmarksHolder = requireNonNull( bookmarksHolder ); - } - - @Override - protected void afterSuccess( Map metadata ) - { - releaseConnection(); - bookmarksHolder.setBookmarks( metadataExtractor.extractBookmarks( metadata ) ); - } - - @Override - protected void afterFailure( Throwable error ) - { - releaseConnection(); - } - - private void releaseConnection() - { - connection.release(); // release in background - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/TransactionPullResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/TransactionPullResponseHandler.java deleted file mode 100644 index 5944d850..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/handlers/pulln/TransactionPullResponseHandler.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.handlers.pulln; - -import java.util.Map; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.MetadataExtractor; - -import static java.util.Objects.requireNonNull; - -public class TransactionPullResponseHandler extends AbstractBasicPullResponseHandler -{ - private final ExplicitTransaction tx; - - public TransactionPullResponseHandler( Statement statement, RunResponseHandler runResponseHandler, - Connection connection, ExplicitTransaction tx, MetadataExtractor metadataExtractor ) - { - super( statement, runResponseHandler, connection, metadataExtractor ); - this.tx = requireNonNull( tx ); - } - - @Override - protected void afterSuccess( Map metadata ) - { - } - - @Override - protected void afterFailure( Throwable error ) - { - // always mark transaction as terminated because every error is "acknowledged" with a RESET message - // so database forgets about the transaction after the first error - // such transaction should not attempt to commit and can be considered as rolled back - tx.markTerminated(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/ChannelActivityLogger.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/ChannelActivityLogger.java deleted file mode 100644 index d45b9e47..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/ChannelActivityLogger.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import io.netty.channel.Channel; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.connection.ChannelAttributes; -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -import static java.lang.String.format; -import static org.neo4j.driver.internal.util.Format.valueOrEmpty; - -public class ChannelActivityLogger extends ReformattedLogger -{ - private final Channel channel; - private final String localChannelId; - - private String dbConnectionId; - private String serverAddress; - - public ChannelActivityLogger( Channel channel, Logging logging, Class owner ) - { - this( channel, logging.getLog( owner.getSimpleName() ) ); - } - - private ChannelActivityLogger( Channel channel, Logger delegate ) - { - super( delegate ); - this.channel = channel; - this.localChannelId = channel != null ? channel.id().toString() : null; - } - - @Override - protected String reformat( String message ) - { - if ( channel == null ) - { - return message; - } - - String dbConnectionId = getDbConnectionId(); - String serverAddress = getServerAddress(); - - return format( "[0x%s][%s][%s] %s", localChannelId, valueOrEmpty( serverAddress ), valueOrEmpty( dbConnectionId ), message ); - } - - private String getDbConnectionId() - { - if ( dbConnectionId == null ) - { - dbConnectionId = ChannelAttributes.connectionId( channel ); - } - return dbConnectionId; - } - - private String getServerAddress() - { - - if ( serverAddress == null ) - { - BoltServerAddress serverAddress = ChannelAttributes.serverAddress( channel ); - this.serverAddress = serverAddress != null ? serverAddress.toString() : null; - } - - return serverAddress; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/ConsoleLogging.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/ConsoleLogging.java deleted file mode 100644 index a817401c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/ConsoleLogging.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import java.time.LocalDateTime; -import java.util.Objects; -import java.util.logging.ConsoleHandler; -import java.util.logging.Formatter; -import java.util.logging.Handler; -import java.util.logging.Level; -import java.util.logging.LogRecord; - -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE_TIME; - -/** - * Internal implementation of the console logging. - * This class should not be used directly. Please use {@link Logging#console(Level)} factory method instead. - * - * @see Logging#console(Level) - */ -public class ConsoleLogging implements Logging -{ - private final Level level; - - public ConsoleLogging( Level level ) - { - this.level = Objects.requireNonNull( level ); - } - - @Override - public Logger getLog( String name ) - { - return new ConsoleLogger( name, level ); - } - - static class ConsoleLogger extends JULogger - { - private final ConsoleHandler handler; - - ConsoleLogger( String name, Level level ) - { - super( name, level ); - java.util.logging.Logger logger = java.util.logging.Logger.getLogger( name ); - - logger.setUseParentHandlers( false ); - // remove all other logging handlers - Handler[] handlers = logger.getHandlers(); - for ( Handler handlerToRemove : handlers ) - { - logger.removeHandler( handlerToRemove ); - } - - handler = new ConsoleHandler(); - handler.setFormatter( new ShortFormatter() ); - handler.setLevel( level ); - logger.addHandler( handler ); - logger.setLevel( level ); - } - } - - private static class ShortFormatter extends Formatter - { - @Override - public String format( LogRecord record ) - { - return LocalDateTime.now().format( ISO_LOCAL_DATE_TIME ) + " " + - record.getLevel() + " " + - record.getLoggerName() + " - " + - formatMessage( record ) + - "\n"; - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/DevNullLogger.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/DevNullLogger.java deleted file mode 100644 index 09cc2537..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/DevNullLogger.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import org.neo4j.driver.Logger; - -public class DevNullLogger implements Logger -{ - public static final Logger DEV_NULL_LOGGER = new DevNullLogger(); - - private DevNullLogger() - { - } - - @Override - public void error( String message, Throwable cause ) - { - } - - @Override - public void info( String message, Object... params ) - { - } - - @Override - public void warn( String message, Object... params ) - { - } - - @Override - public void warn( String message, Throwable cause ) - { - } - - @Override - public void debug( String message, Object... params ) - { - } - - @Override - public void trace( String message, Object... params ) - { - } - - @Override - public boolean isTraceEnabled() - { - return false; - } - - @Override - public boolean isDebugEnabled() - { - return false; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/DevNullLogging.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/DevNullLogging.java deleted file mode 100644 index 90dc0443..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/DevNullLogging.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -public class DevNullLogging implements Logging -{ - public static final Logging DEV_NULL_LOGGING = new DevNullLogging(); - - private DevNullLogging() - { - } - - @Override - public Logger getLog( String name ) - { - return DevNullLogger.DEV_NULL_LOGGER; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/JULogger.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/JULogger.java deleted file mode 100644 index fb381d51..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/JULogger.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import java.util.logging.Level; - -import org.neo4j.driver.Logger; - -public class JULogger implements Logger -{ - private final java.util.logging.Logger delegate; - private final boolean debugEnabled; - private final boolean traceEnabled; - - public JULogger( String name, Level loggingLevel ) - { - delegate = java.util.logging.Logger.getLogger( name ); - delegate.setLevel( loggingLevel ); - debugEnabled = delegate.isLoggable( Level.FINE ); - traceEnabled = delegate.isLoggable( Level.FINEST ); - } - - @Override - public void error( String message, Throwable cause ) - { - delegate.log( Level.SEVERE, message, cause ); - } - - @Override - public void info( String format, Object... params ) - { - delegate.log( Level.INFO, String.format( format, params ) ); - } - - @Override - public void warn( String format, Object... params ) - { - delegate.log( Level.WARNING, String.format( format, params ) ); - } - - @Override - public void warn( String message, Throwable cause ) - { - delegate.log( Level.WARNING, message, cause ); - } - - @Override - public void debug( String format, Object... params ) - { - if( debugEnabled ) - { - delegate.log( Level.FINE, String.format( format, params ) ); - } - } - - @Override - public void trace( String format, Object... params ) - { - if( traceEnabled ) - { - delegate.log( Level.FINEST, String.format( format, params ) ); - } - } - - @Override - public boolean isTraceEnabled() - { - return traceEnabled; - } - - @Override - public boolean isDebugEnabled() - { - return debugEnabled; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/JULogging.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/JULogging.java deleted file mode 100644 index 4bd9d53b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/JULogging.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import java.util.logging.Level; - -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -/** - * Internal implementation of the JUL. - * This class should not be used directly. Please use {@link Logging#javaUtilLogging(Level)} factory method instead. - * - * @see Logging#javaUtilLogging(Level) - */ -public class JULogging implements Logging -{ - private final Level loggingLevel; - - public JULogging( Level loggingLevel ) - { - this.loggingLevel = loggingLevel; - } - - @Override - public Logger getLog( String name ) - { - return new JULogger( name, loggingLevel ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/NettyLogger.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/NettyLogger.java deleted file mode 100644 index 2938f65d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/NettyLogger.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import io.netty.util.internal.logging.AbstractInternalLogger; - -import java.util.regex.Pattern; - -import org.neo4j.driver.Logger; - -import static java.lang.String.format; - -public class NettyLogger extends AbstractInternalLogger -{ - private Logger log; - private static final Pattern PLACE_HOLDER_PATTERN = Pattern.compile("\\{\\}"); - - public NettyLogger( String name, Logger log ) - { - super( name ); - this.log = log; - } - - @Override - public boolean isTraceEnabled() - { - return log.isTraceEnabled(); - } - - @Override - public void trace( String msg ) - { - log.trace( msg ); - } - - @Override - public void trace( String format, Object arg ) - { - log.trace( toDriverLoggerFormat( format ), arg ); - } - - @Override - public void trace( String format, Object argA, Object argB ) - { - log.trace( toDriverLoggerFormat( format ), argA, argB ); - } - - @Override - public void trace( String format, Object... arguments ) - { - log.trace( toDriverLoggerFormat( format ), arguments ); - } - - @Override - public void trace( String msg, Throwable t ) - { - log.trace( "%s%n%s", msg, t ); - } - - @Override - public boolean isDebugEnabled() - { - return log.isDebugEnabled(); - } - - @Override - public void debug( String msg ) - { - log.debug( msg ); - } - - @Override - public void debug( String format, Object arg ) - { - log.debug( toDriverLoggerFormat( format ), arg ); - } - - @Override - public void debug( String format, Object argA, Object argB ) - { - log.debug( toDriverLoggerFormat( format ), argA, argB ); - } - - @Override - public void debug( String format, Object... arguments ) - { - log.debug( toDriverLoggerFormat( format ), arguments ); - } - - @Override - public void debug( String msg, Throwable t ) - { - log.debug( "%s%n%s", msg, t ); - } - - @Override - public boolean isInfoEnabled() - { - return true; - } - - @Override - public void info( String msg ) - { - log.info( msg ); - } - - @Override - public void info( String format, Object arg ) - { - log.info( toDriverLoggerFormat( format ), arg ); - } - - @Override - public void info( String format, Object argA, Object argB ) - { - log.info( toDriverLoggerFormat( format ), argA, argB ); - } - - @Override - public void info( String format, Object... arguments ) - { - log.info( toDriverLoggerFormat( format ), arguments ); - } - - @Override - public void info( String msg, Throwable t ) - { - log.info( "%s%n%s", msg, t ); - } - - @Override - public boolean isWarnEnabled() - { - return true; - } - - @Override - public void warn( String msg ) - { - log.warn( msg ); - } - - @Override - public void warn( String format, Object arg ) - { - log.warn( toDriverLoggerFormat( format ), arg ); - } - - @Override - public void warn( String format, Object... arguments ) - { - log.warn( toDriverLoggerFormat( format ), arguments ); - } - - @Override - public void warn( String format, Object argA, Object argB ) - { - log.warn( toDriverLoggerFormat( format ), argA, argB ); - } - - @Override - public void warn( String msg, Throwable t ) - { - log.warn( "%s%n%s", msg, t ); - } - - @Override - public boolean isErrorEnabled() - { - return true; - } - - @Override - public void error( String msg ) - { - log.error( msg, null ); - } - - @Override - public void error( String format, Object arg ) - { - error( format, new Object[]{arg} ); - } - - @Override - public void error( String format, Object argA, Object argB ) - { - error( format, new Object[]{argA, argB} ); - } - - @Override - public void error( String format, Object... arguments ) - { - format = toDriverLoggerFormat( format ); - if ( arguments.length == 0 ) - { - log.error( format, null ); - return; - } - - Object arg = arguments[arguments.length - 1]; - if ( arg instanceof Throwable ) - { - // still give all arguments to string format, - // for the worst case, the redundant parameter will be ignored. - log.error( format( format, arguments ), (Throwable) arg ); - } - } - - @Override - public void error( String msg, Throwable t ) - { - log.error( msg, t ); - } - - private String toDriverLoggerFormat( String format ) - { - return PLACE_HOLDER_PATTERN.matcher( format ).replaceAll( "%s" ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/NettyLogging.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/NettyLogging.java deleted file mode 100644 index 7a7770c9..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/NettyLogging.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; - -import org.neo4j.driver.Logging; - -/** - * This is the logging factory to delegate netty's logging to our logging system - */ -public class NettyLogging extends InternalLoggerFactory -{ - private Logging logging; - - public NettyLogging( Logging logging ) - { - this.logging = logging; - } - - @Override - protected InternalLogger newInstance( String name ) - { - return new NettyLogger( name, logging.getLog( name ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/PrefixedLogger.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/PrefixedLogger.java deleted file mode 100644 index fcd3af23..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/PrefixedLogger.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import org.neo4j.driver.Logger; - -public class PrefixedLogger extends ReformattedLogger -{ - private final String messagePrefix; - - public PrefixedLogger( Logger delegate ) - { - this( null, delegate ); - } - - public PrefixedLogger( String messagePrefix, Logger delegate ) - { - super(delegate); - this.messagePrefix = messagePrefix; - } - - @Override - protected String reformat( String message ) - { - if ( messagePrefix == null ) - { - return message; - } - return String.format( "%s %s", messagePrefix, message ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/ReformattedLogger.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/ReformattedLogger.java deleted file mode 100644 index aafd41b3..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/ReformattedLogger.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import org.neo4j.driver.Logger; - -import static java.util.Objects.requireNonNull; - -public abstract class ReformattedLogger implements Logger -{ - private final Logger delegate; - - protected ReformattedLogger(Logger delegate) - { - this.delegate = requireNonNull( delegate ); - } - - @Override - public void error( String message, Throwable cause ) - { - delegate.error( reformat( message ), cause ); - } - - @Override - public void info( String message, Object... params ) - { - delegate.info( reformat( message ), params ); - } - - @Override - public void warn( String message, Object... params ) - { - delegate.warn( reformat( message ), params ); - } - - @Override - public void warn( String message, Throwable cause ) - { - delegate.warn( reformat( message ), cause ); - } - - @Override - public void debug( String message, Object... params ) - { - if ( isDebugEnabled() ) - { - delegate.debug( reformat( message ), params ); - } - } - - @Override - public void trace( String message, Object... params ) - { - if ( isTraceEnabled() ) - { - delegate.trace( reformat( message ), params ); - } - } - - @Override - public boolean isTraceEnabled() - { - return delegate.isTraceEnabled(); - } - - @Override - public boolean isDebugEnabled() - { - return delegate.isDebugEnabled(); - } - - protected abstract String reformat( String message ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/Slf4jLogger.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/Slf4jLogger.java deleted file mode 100644 index da5b4773..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/Slf4jLogger.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import java.util.Objects; - -import org.neo4j.driver.Logger; - -class Slf4jLogger implements Logger -{ - private final org.slf4j.Logger delegate; - - Slf4jLogger( org.slf4j.Logger delegate ) - { - this.delegate = Objects.requireNonNull( delegate ); - } - - @Override - public void error( String message, Throwable cause ) - { - if ( delegate.isErrorEnabled() ) - { - delegate.error( message, cause ); - } - } - - @Override - public void info( String message, Object... params ) - { - if ( delegate.isInfoEnabled() ) - { - delegate.info( formatMessage( message, params ) ); - } - } - - @Override - public void warn( String message, Object... params ) - { - if ( delegate.isWarnEnabled() ) - { - delegate.warn( formatMessage( message, params ) ); - } - } - - @Override - public void warn( String message, Throwable cause ) - { - if ( delegate.isWarnEnabled() ) - { - delegate.warn( message, cause ); - } - } - - @Override - public void debug( String message, Object... params ) - { - if ( isDebugEnabled() ) - { - delegate.debug( formatMessage( message, params ) ); - } - } - - @Override - public void trace( String message, Object... params ) - { - if ( isTraceEnabled() ) - { - delegate.trace( formatMessage( message, params ) ); - } - } - - @Override - public boolean isTraceEnabled() - { - return delegate.isTraceEnabled(); - } - - @Override - public boolean isDebugEnabled() - { - return delegate.isDebugEnabled(); - } - - /** - * Creates a fully formatted message. Such formatting is needed because driver uses {@link String#format(String, Object...)} parameters in message - * templates, i.e. '%s' or '%d' while SLF4J uses '{}'. Thus this logger passes fully formatted messages to SLF4J. - * - * @param messageTemplate the message template. - * @param params the parameters. - * @return fully formatted message string. - */ - private static String formatMessage( String messageTemplate, Object... params ) - { - return String.format( messageTemplate, params ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/logging/Slf4jLogging.java b/src/graiph-driver/java/org/neo4j/driver/internal/logging/Slf4jLogging.java deleted file mode 100644 index 4d86749a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/logging/Slf4jLogging.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.logging; - -import org.slf4j.LoggerFactory; - -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; - -/** - * Internal implementation of the SLF4J logging. - * This class should not be used directly. Please use {@link Logging#slf4j()} factory method instead. - * - * @see Logging#slf4j() - */ -public class Slf4jLogging implements Logging -{ - @Override - public Logger getLog( String name ) - { - return new Slf4jLogger( LoggerFactory.getLogger( name ) ); - } - - public static RuntimeException checkAvailability() - { - try - { - Class.forName( "org.slf4j.LoggerFactory" ); - return null; - } - catch ( Throwable error ) - { - return new IllegalStateException( - "SLF4J logging is not available. Please add dependencies on slf4j-api and SLF4J binding (Logback, Log4j, etc.)", - error ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/AbstractMessageWriter.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/AbstractMessageWriter.java deleted file mode 100644 index 96584423..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/AbstractMessageWriter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging; - -import java.io.IOException; -import java.util.Map; - -import static java.util.Objects.requireNonNull; - -public abstract class AbstractMessageWriter implements MessageFormat.Writer -{ - private final ValuePacker packer; - private final Map encodersByMessageSignature; - - protected AbstractMessageWriter( ValuePacker packer, Map encodersByMessageSignature ) - { - this.packer = requireNonNull( packer ); - this.encodersByMessageSignature = requireNonNull( encodersByMessageSignature ); - } - - @Override - public final void write( Message msg ) throws IOException - { - byte signature = msg.signature(); - MessageEncoder encoder = encodersByMessageSignature.get( signature ); - if ( encoder == null ) - { - throw new IOException( "No encoder found for message " + msg + " with signature " + signature ); - } - encoder.encode( msg, packer ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/BoltProtocol.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/BoltProtocol.java deleted file mode 100644 index dcdb8cbc..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/BoltProtocol.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelPromise; -import org.neo4j.driver.*; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.internal.Bookmarks; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.cursor.StatementResultCursorFactory; -import org.neo4j.driver.internal.messaging.v1.BoltProtocolV1; -import org.neo4j.driver.internal.messaging.v2.BoltProtocolV2; -import org.neo4j.driver.internal.messaging.v3.BoltProtocolV3; -import org.neo4j.driver.internal.messaging.v4.BoltProtocolV4; -import org.neo4j.driver.internal.messaging.v5.BoltProtocolV5; -import org.neo4j.driver.internal.spi.Connection; - -import java.util.Map; -import java.util.concurrent.CompletionStage; - -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.protocolVersion; - -public interface BoltProtocol { - /** - * Instantiate {@link MessageFormat} used by this Bolt protocol verison. - * - * @return new message format. - */ - MessageFormat createMessageFormat(); - - /** - * Initialize channel after it is connected and handshake selected this protocol version. - * - * @param userAgent the user agent string. - * @param authToken the authentication token. - * @param channelInitializedPromise the promise to be notified when initialization is completed. - */ - void initializeChannel(String userAgent, Map authToken, ChannelPromise channelInitializedPromise); - - /** - * Prepare to close channel before it is closed. - * - * @param channel the channel to close. - */ - void prepareToCloseChannel(Channel channel); - - /** - * Begin an explicit transaction. - * - * @param connection the connection to use. - * @param bookmarks the bookmarks. Never null, should be {@link Bookmarks#empty()} when absent. - * @param config the transaction configuration. Never null, should be {@link TransactionConfig#empty()} when absent. - * @return a completion stage completed when transaction is started or completed exceptionally when there was a failure. - */ - CompletionStage beginTransaction(Connection connection, Bookmarks bookmarks, TransactionConfig config); - - /** - * Commit the explicit transaction. - * - * @param connection the connection to use. - * @return a completion stage completed with a bookmark when transaction is committed or completed exceptionally when there was a failure. - */ - CompletionStage commitTransaction(Connection connection); - - /** - * Rollback the explicit transaction. - * - * @param connection the connection to use. - * @return a completion stage completed when transaction is rolled back or completed exceptionally when there was a failure. - */ - CompletionStage rollbackTransaction(Connection connection); - - /** - * Execute the given statement in an aut-commit transaction, i.e. {@link Session#run(Statement)}. - * - * @param connection the network connection to use. - * @param statement the cypher to execute. - * @param bookmarksHolder the bookmarksHolder that keeps track of the current bookmark and can be updated with a new bookmark. - * @param config the transaction config for the implicitly started auto-commit transaction. - * @param waitForRunResponse {@code true} for async query execution and {@code false} for blocking query - * execution. Makes returned cursor stage be chained after the RUN response arrives. Needed to have statement - * keys populated. - * @return stage with cursor. - */ - StatementResultCursorFactory runInAutoCommitTransaction(Connection connection, Statement statement, - BookmarksHolder bookmarksHolder, TransactionConfig config, boolean waitForRunResponse); - - /** - * Execute the given statement in a running explicit transaction, i.e. {@link Transaction#run(Statement)}. - * - * @param connection the network connection to use. - * @param statement the cypher to execute. - * @param tx the transaction which executes the query. - * @param waitForRunResponse {@code true} for async query execution and {@code false} for blocking query - * execution. Makes returned cursor stage be chained after the RUN response arrives. Needed to have statement - * keys populated. - * @return stage with cursor. - */ - StatementResultCursorFactory runInExplicitTransaction(Connection connection, Statement statement, ExplicitTransaction tx, - boolean waitForRunResponse); - - /** - * Returns the protocol version. It can be used for version specific error messages. - * - * @return the protocol version. - */ - int version(); - - /** - * Obtain an instance of the protocol for the given channel. - * - * @param channel the channel to get protocol for. - * @return the protocol. - * @throws ClientException when unable to find protocol version for the given channel. - */ - static BoltProtocol forChannel(Channel channel) { - return forVersion(protocolVersion(channel)); - } - - /** - * Obtain an instance of the protocol for the given channel. - * - * @param version the version of the protocol. - * @return the protocol. - * @throws ClientException when unable to find protocol with the given version. - */ - static BoltProtocol forVersion(int version) { - switch (version) { - case BoltProtocolV1.VERSION: - return BoltProtocolV1.INSTANCE; - case BoltProtocolV2.VERSION: - return BoltProtocolV2.INSTANCE; - case BoltProtocolV3.VERSION: - return BoltProtocolV3.INSTANCE; - case BoltProtocolV4.VERSION: - return BoltProtocolV4.INSTANCE; - case BoltProtocolV5.VERSION: - return BoltProtocolV5.INSTANCE; - default: - throw new ClientException("Unknown protocol version: " + version); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/Message.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/Message.java deleted file mode 100644 index 5fde6381..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/Message.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging; - -/** - * Base class for all protocol messages. - */ -public interface Message -{ - byte signature(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/MessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/MessageEncoder.java deleted file mode 100644 index cfed994f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/MessageEncoder.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging; - -import java.io.IOException; - -public interface MessageEncoder -{ - void encode( Message message, ValuePacker packer ) throws IOException; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/MessageFormat.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/MessageFormat.java deleted file mode 100644 index 5a8e66bd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/MessageFormat.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging; - -import java.io.IOException; - -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.internal.packstream.PackOutput; - -public interface MessageFormat -{ - interface Writer - { - void write( Message msg ) throws IOException; - } - - interface Reader - { - void read( ResponseMessageHandler handler ) throws IOException; - } - - Writer newWriter( PackOutput output, boolean byteArraySupportEnabled ); - - Reader newReader( PackInput input ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ResponseMessageHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ResponseMessageHandler.java deleted file mode 100644 index 40f7287d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ResponseMessageHandler.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging; - -import java.io.IOException; -import java.util.Map; - -import org.neo4j.driver.Value; - -public interface ResponseMessageHandler -{ - void handleSuccessMessage( Map meta ) throws IOException; - - void handleRecordMessage( Value[] fields ) throws IOException; - - void handleFailureMessage( String code, String message ) throws IOException; - - void handleIgnoredMessage() throws IOException; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ValuePacker.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ValuePacker.java deleted file mode 100644 index 90c3f431..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ValuePacker.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging; - -import java.io.IOException; -import java.util.Map; - -import org.neo4j.driver.Value; - -public interface ValuePacker -{ - void packStructHeader( int size, byte signature ) throws IOException; - - void pack( String string ) throws IOException; - - void pack( Value value ) throws IOException; - - void pack( Map map ) throws IOException; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ValueUnpacker.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ValueUnpacker.java deleted file mode 100644 index c2bb476f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/ValueUnpacker.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging; - -import java.io.IOException; -import java.util.Map; - -import org.neo4j.driver.Value; - -public interface ValueUnpacker -{ - long unpackStructHeader() throws IOException; - - int unpackStructSignature() throws IOException; - - Map unpackMap() throws IOException; - - Value[] unpackArray() throws IOException; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/BeginMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/BeginMessageEncoder.java deleted file mode 100644 index ccc93c97..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/BeginMessageEncoder.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.BeginMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class BeginMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, BeginMessage.class ); - BeginMessage beginMessage = (BeginMessage) message; - packer.packStructHeader( 1, beginMessage.signature() ); - packer.pack( beginMessage.metadata() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/CommitMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/CommitMessageEncoder.java deleted file mode 100644 index 030c4eeb..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/CommitMessageEncoder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.CommitMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class CommitMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, CommitMessage.class ); - packer.packStructHeader( 0, CommitMessage.SIGNATURE ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/DiscardAllMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/DiscardAllMessageEncoder.java deleted file mode 100644 index 3cc11fa9..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/DiscardAllMessageEncoder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.DiscardAllMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class DiscardAllMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, DiscardAllMessage.class ); - packer.packStructHeader( 0, DiscardAllMessage.SIGNATURE ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/DiscardMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/DiscardMessageEncoder.java deleted file mode 100644 index 64bed210..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/DiscardMessageEncoder.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.DiscardMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class DiscardMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, DiscardMessage.class ); - packer.packStructHeader( 1, DiscardMessage.SIGNATURE ); - packer.pack( ((DiscardMessage) message).metadata() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/GoodbyeMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/GoodbyeMessageEncoder.java deleted file mode 100644 index d7b6239b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/GoodbyeMessageEncoder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.GoodbyeMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class GoodbyeMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, GoodbyeMessage.class ); - packer.packStructHeader( 0, GoodbyeMessage.SIGNATURE ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/HelloMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/HelloMessageEncoder.java deleted file mode 100644 index 3aca6cee..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/HelloMessageEncoder.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.HelloMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class HelloMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, HelloMessage.class ); - HelloMessage helloMessage = (HelloMessage) message; - packer.packStructHeader( 1, helloMessage.signature() ); - packer.pack( helloMessage.metadata() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/InitMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/InitMessageEncoder.java deleted file mode 100644 index c9c2235a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/InitMessageEncoder.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.InitMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class InitMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, InitMessage.class ); - InitMessage initMessage = (InitMessage) message; - packer.packStructHeader( 2, initMessage.signature() ); - packer.pack( initMessage.userAgent() ); - packer.pack( initMessage.authToken() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/PullAllMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/PullAllMessageEncoder.java deleted file mode 100644 index 3b52d85f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/PullAllMessageEncoder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.PullAllMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class PullAllMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, PullAllMessage.class ); - packer.packStructHeader( 0, PullAllMessage.SIGNATURE ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/PullMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/PullMessageEncoder.java deleted file mode 100644 index 97310038..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/PullMessageEncoder.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.PullMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class PullMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, PullMessage.class ); - packer.packStructHeader( 1, PullMessage.SIGNATURE ); - packer.pack( ((PullMessage) message).metadata() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/ResetMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/ResetMessageEncoder.java deleted file mode 100644 index 3b83d4df..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/ResetMessageEncoder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.ResetMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class ResetMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, ResetMessage.class ); - packer.packStructHeader( 0, ResetMessage.SIGNATURE ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RollbackMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RollbackMessageEncoder.java deleted file mode 100644 index 20d0cfdc..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RollbackMessageEncoder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.RollbackMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class RollbackMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, RollbackMessage.class ); - packer.packStructHeader( 0, RollbackMessage.SIGNATURE ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RunMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RunMessageEncoder.java deleted file mode 100644 index 46f4e0ca..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RunMessageEncoder.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.RunMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class RunMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, RunMessage.class ); - RunMessage runMessage = (RunMessage) message; - packer.packStructHeader( 2, runMessage.signature() ); - packer.pack( runMessage.statement() ); - packer.pack( runMessage.parameters() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RunWithMetadataMessageEncoder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RunWithMetadataMessageEncoder.java deleted file mode 100644 index 877d27cf..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/encode/RunWithMetadataMessageEncoder.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.encode; - -import java.io.IOException; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.request.RunWithMetadataMessage; - -import static org.neo4j.driver.internal.util.Preconditions.checkArgument; - -public class RunWithMetadataMessageEncoder implements MessageEncoder -{ - @Override - public void encode( Message message, ValuePacker packer ) throws IOException - { - checkArgument( message, RunWithMetadataMessage.class ); - RunWithMetadataMessage runMessage = (RunWithMetadataMessage) message; - packer.packStructHeader( 3, runMessage.signature() ); - packer.pack( runMessage.statement() ); - packer.pack( runMessage.parameters() ); - packer.pack( runMessage.metadata() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/AbstractStreamingMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/AbstractStreamingMessage.java deleted file mode 100644 index 02fa7719..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/AbstractStreamingMessage.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; - -import static org.neo4j.driver.internal.util.MetadataExtractor.ABSENT_QUERY_ID; - -public abstract class AbstractStreamingMessage implements Message -{ - private final Map metadata = new HashMap<>(); - public static final long STREAM_LIMIT_UNLIMITED = -1; - - AbstractStreamingMessage( long n, long id ) - { - this.metadata.put( "n", Values.value( n ) ); - if ( id != ABSENT_QUERY_ID ) - { - this.metadata.put( "qid", Values.value( id ) ); - } - } - - public Map metadata() - { - return metadata; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - AbstractStreamingMessage that = (AbstractStreamingMessage) o; - return Objects.equals( metadata, that.metadata ); - } - - protected abstract String name(); - - @Override - public int hashCode() - { - return Objects.hash( metadata ); - } - - @Override - public String toString() - { - return String.format( "%s %s", name(), metadata ); - } - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/BeginMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/BeginMessage.java deleted file mode 100644 index cf5666e1..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/BeginMessage.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.time.Duration; -import java.util.Map; -import java.util.Objects; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.Value; -import org.neo4j.driver.internal.Bookmarks; - -import static org.neo4j.driver.internal.messaging.request.TransactionMetadataBuilder.buildMetadata; - -public class BeginMessage extends MessageWithMetadata -{ - public static final byte SIGNATURE = 0x11; - - public BeginMessage( Bookmarks bookmarks, TransactionConfig config, String databaseName, AccessMode mode ) - { - this( bookmarks, config.timeout(), config.metadata(), mode, databaseName ); - } - - public BeginMessage( Bookmarks bookmarks, Duration txTimeout, Map txMetadata, AccessMode mode, String databaseName ) - { - super( buildMetadata( txTimeout, txMetadata, databaseName, mode, bookmarks ) ); - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - BeginMessage that = (BeginMessage) o; - return Objects.equals( metadata(), that.metadata() ); - } - - @Override - public int hashCode() - { - return Objects.hash( metadata() ); - } - - @Override - public String toString() - { - return "BEGIN " + metadata(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/CommitMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/CommitMessage.java deleted file mode 100644 index 7e7b343c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/CommitMessage.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import org.neo4j.driver.internal.messaging.Message; - -public class CommitMessage implements Message -{ - public static final byte SIGNATURE = 0x12; - - public static final Message COMMIT = new CommitMessage(); - - private CommitMessage() - { - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return "COMMIT"; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/DiscardAllMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/DiscardAllMessage.java deleted file mode 100644 index c775ea69..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/DiscardAllMessage.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import org.neo4j.driver.internal.messaging.Message; - -public class DiscardAllMessage implements Message -{ - public final static byte SIGNATURE = 0x2F; - - public static final DiscardAllMessage DISCARD_ALL = new DiscardAllMessage(); - - private DiscardAllMessage() - { - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return "DISCARD_ALL"; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/DiscardMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/DiscardMessage.java deleted file mode 100644 index 8311dfdf..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/DiscardMessage.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -public class DiscardMessage extends AbstractStreamingMessage -{ - public final static byte SIGNATURE = 0x2F; - - public static DiscardMessage newDiscardAllMessage( long id ) - { - return new DiscardMessage( STREAM_LIMIT_UNLIMITED, id ); - } - - public DiscardMessage( long n, long id ) - { - super( n, id ); - } - - @Override - protected String name() - { - return "DISCARD"; - } - - @Override - public byte signature() - { - return SIGNATURE; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/GoodbyeMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/GoodbyeMessage.java deleted file mode 100644 index e81eacbb..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/GoodbyeMessage.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import org.neo4j.driver.internal.messaging.Message; - -public class GoodbyeMessage implements Message -{ - public final static byte SIGNATURE = 0x02; - - public static final GoodbyeMessage GOODBYE = new GoodbyeMessage(); - - private GoodbyeMessage() - { - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return "GOODBYE"; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/HelloMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/HelloMessage.java deleted file mode 100644 index 796b3968..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/HelloMessage.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; - -import org.neo4j.driver.Value; - -import static org.neo4j.driver.Values.value; -import static org.neo4j.driver.internal.security.InternalAuthToken.CREDENTIALS_KEY; - -public class HelloMessage extends MessageWithMetadata -{ - public final static byte SIGNATURE = 0x01; - - private static final String USER_AGENT_METADATA_KEY = "user_agent"; - - public HelloMessage( String userAgent, Map authToken ) - { - super( buildMetadata( userAgent, authToken ) ); - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - HelloMessage that = (HelloMessage) o; - return Objects.equals( metadata(), that.metadata() ); - } - - @Override - public int hashCode() - { - return Objects.hash( metadata() ); - } - - @Override - public String toString() - { - Map metadataCopy = new HashMap<>( metadata() ); - metadataCopy.replace( CREDENTIALS_KEY, value( "******" ) ); - return "HELLO " + metadataCopy; - } - - private static Map buildMetadata( String userAgent, Map authToken ) - { - Map result = new HashMap<>( authToken ); - result.put( USER_AGENT_METADATA_KEY, value( userAgent ) ); - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/InitMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/InitMessage.java deleted file mode 100644 index a08e46ac..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/InitMessage.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.util.Map; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.Value; - -import static java.lang.String.format; - -/** - * INIT request message - *

- * Sent by clients to initialize a new connection. Must be sent as the very first message after protocol negotiation. - */ -public class InitMessage implements Message -{ - public final static byte SIGNATURE = 0x01; - - private final String userAgent; - private Map authToken; - - public InitMessage( String userAgent, Map authToken ) - { - this.userAgent = userAgent; - this.authToken = authToken; - } - - public String userAgent() - { - return userAgent; - } - - public Map authToken() - { - return authToken; - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return format( "INIT \"%s\" {...}", userAgent ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { return true; } - if ( o == null || getClass() != o.getClass() ) - { return false; } - - InitMessage that = (InitMessage) o; - - return !(userAgent != null ? !userAgent.equals( that.userAgent ) - : that.userAgent != null); - - } - - @Override - public int hashCode() - { - return userAgent != null ? userAgent.hashCode() : 0; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/MessageWithMetadata.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/MessageWithMetadata.java deleted file mode 100644 index 484d20f8..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/MessageWithMetadata.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.util.Map; - -import org.neo4j.driver.Value; -import org.neo4j.driver.internal.messaging.Message; - -abstract class MessageWithMetadata implements Message -{ - private final Map metadata; - - public MessageWithMetadata( Map metadata ) - { - this.metadata = metadata; - } - - public Map metadata() - { - return metadata; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/MultiDatabaseUtil.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/MultiDatabaseUtil.java deleted file mode 100644 index ea810d7c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/MultiDatabaseUtil.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.util.Objects; - -import org.neo4j.driver.exceptions.ClientException; - -public final class MultiDatabaseUtil -{ - public static final String ABSENT_DB_NAME = ""; - - public static void assertEmptyDatabaseName( String databaseName, int version ) - { - if ( !Objects.equals( ABSENT_DB_NAME, databaseName ) ) - { - throw new ClientException( String.format( "Database name parameter for selecting database is not supported in Bolt Protocol Version %s. " + - "Database name: `%s`", version, databaseName ) ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/PullAllMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/PullAllMessage.java deleted file mode 100644 index f58d8245..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/PullAllMessage.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import org.neo4j.driver.internal.messaging.Message; - -/** - * PULL_ALL request message - *

- * Sent by clients to pull the entirety of the remaining stream down. - */ -public class PullAllMessage implements Message -{ - public static final byte SIGNATURE = 0x3F; - - public static final PullAllMessage PULL_ALL = new PullAllMessage(); - - private PullAllMessage() - { - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return "PULL_ALL"; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/PullMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/PullMessage.java deleted file mode 100644 index b6f4774a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/PullMessage.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import static org.neo4j.driver.internal.util.MetadataExtractor.ABSENT_QUERY_ID; - -/** - * PULL request message - *

- * Sent by clients to pull the entirety of the remaining stream down. - */ -public class PullMessage extends AbstractStreamingMessage -{ - public static final byte SIGNATURE = 0x3F; - public static final PullMessage PULL_ALL = new PullMessage( STREAM_LIMIT_UNLIMITED, ABSENT_QUERY_ID ); - - public PullMessage( long n, long id ) - { - super( n, id ); - } - - @Override - protected String name() - { - return "PULL"; - } - - @Override - public byte signature() - { - return SIGNATURE; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/ResetMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/ResetMessage.java deleted file mode 100644 index dfaab70c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/ResetMessage.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import org.neo4j.driver.internal.messaging.Message; - -/** - * RESET request message - *

- * Sent by clients to reset a session to a clean state - closing any open transaction or result streams. - * This also acknowledges receipt of failures sent by the server. This is required to - * allow optimistic sending of multiple messages before responses have been received - pipelining. - *

- * When something goes wrong, we want the server to stop processing our already sent messages, - * but the server cannot tell the difference between what was sent before and after we saw the - * error. - *

- * This message acts as a barrier after an error, informing the server that we've seen the error - * message, and that messages that follow this one are safe to execute. - */ -public class ResetMessage implements Message -{ - public static final byte SIGNATURE = 0x0F; - - public static final ResetMessage RESET = new ResetMessage(); - - private ResetMessage() - { - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return "RESET"; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RollbackMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RollbackMessage.java deleted file mode 100644 index 659c6ba0..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RollbackMessage.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import org.neo4j.driver.internal.messaging.Message; - -public class RollbackMessage implements Message -{ - public static final byte SIGNATURE = 0x13; - - public static final Message ROLLBACK = new RollbackMessage(); - - private RollbackMessage() - { - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return "ROLLBACK"; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RunMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RunMessage.java deleted file mode 100644 index 11e589d6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RunMessage.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.util.Collections; -import java.util.Map; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.Value; - -import static java.lang.String.format; - -/** - * RUN request message - *

- * Sent by clients to start a new Tank job for a given statement and - * parameter set. - */ -public class RunMessage implements Message -{ - public final static byte SIGNATURE = 0x10; - - private final String statement; - private final Map parameters; - - public RunMessage( String statement ) - { - this( statement, Collections.emptyMap() ); - } - - public RunMessage( String statement, Map parameters ) - { - this.statement = statement; - this.parameters = parameters; - } - - public String statement() - { - return statement; - } - - public Map parameters() - { - return parameters; - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return format( "RUN \"%s\" %s", statement, parameters ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - RunMessage that = (RunMessage) o; - - return !(parameters != null ? !parameters.equals( that.parameters ) : that.parameters != null) && - !(statement != null ? !statement.equals( that.statement ) : that.statement != null); - - } - - @Override - public int hashCode() - { - int result = statement != null ? statement.hashCode() : 0; - result = 31 * result + (parameters != null ? parameters.hashCode() : 0); - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RunWithMetadataMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RunWithMetadataMessage.java deleted file mode 100644 index d460e489..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/RunWithMetadataMessage.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.time.Duration; -import java.util.Map; -import java.util.Objects; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Statement; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.Value; -import org.neo4j.driver.internal.Bookmarks; - -import static java.util.Collections.emptyMap; -import static org.neo4j.driver.Values.ofValue; -import static org.neo4j.driver.internal.messaging.request.TransactionMetadataBuilder.buildMetadata; - -public class RunWithMetadataMessage extends MessageWithMetadata -{ - public final static byte SIGNATURE = 0x10; - - private final String statement; - private final Map parameters; - - public static RunWithMetadataMessage autoCommitTxRunMessage( Statement statement, TransactionConfig config, String databaseName, AccessMode mode, - Bookmarks bookmarks ) - { - return autoCommitTxRunMessage( statement, config.timeout(), config.metadata(), databaseName, mode, bookmarks ); - } - - public static RunWithMetadataMessage autoCommitTxRunMessage( Statement statement, Duration txTimeout, Map txMetadata, String databaseName, - AccessMode mode, Bookmarks bookmarks ) - { - Map metadata = buildMetadata( txTimeout, txMetadata, databaseName, mode, bookmarks ); - return new RunWithMetadataMessage( statement.text(), statement.parameters().asMap( ofValue() ), metadata ); - } - - public static RunWithMetadataMessage explicitTxRunMessage( Statement statement ) - { - return new RunWithMetadataMessage( statement.text(), statement.parameters().asMap( ofValue() ), emptyMap() ); - } - - private RunWithMetadataMessage( String statement, Map parameters, Map metadata ) - { - super( metadata ); - this.statement = statement; - this.parameters = parameters; - } - - public String statement() - { - return statement; - } - - public Map parameters() - { - return parameters; - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - RunWithMetadataMessage that = (RunWithMetadataMessage) o; - return Objects.equals( statement, that.statement ) && Objects.equals( parameters, that.parameters ) && Objects.equals( metadata(), that.metadata() ); - } - - @Override - public int hashCode() - { - return Objects.hash( statement, parameters, metadata() ); - } - - @Override - public String toString() - { - return "RUN \"" + statement + "\" " + parameters + " " + metadata(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/TransactionMetadataBuilder.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/TransactionMetadataBuilder.java deleted file mode 100644 index d6403089..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/request/TransactionMetadataBuilder.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.request; - -import java.time.Duration; -import java.util.Map; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Value; -import org.neo4j.driver.internal.Bookmarks; -import org.neo4j.driver.internal.util.Iterables; - -import static java.util.Collections.emptyMap; -import static org.neo4j.driver.Values.value; -import static org.neo4j.driver.internal.messaging.request.MultiDatabaseUtil.ABSENT_DB_NAME; - -public class TransactionMetadataBuilder -{ - private static final String BOOKMARKS_METADATA_KEY = "bookmarks"; - private static final String DATABASE_NAME_KEY = "db"; - private static final String TX_TIMEOUT_METADATA_KEY = "tx_timeout"; - private static final String TX_METADATA_METADATA_KEY = "tx_metadata"; - private static final String MODE_KEY = "mode"; - private static final String MODE_READ_VALUE = "r"; - - public static Map buildMetadata( Duration txTimeout, Map txMetadata, AccessMode mode, Bookmarks bookmarks ) - { - return buildMetadata( txTimeout, txMetadata, ABSENT_DB_NAME, mode, bookmarks ); - } - - public static Map buildMetadata( Duration txTimeout, Map txMetadata, String databaseName, AccessMode mode, Bookmarks bookmarks ) - { - boolean bookmarksPresent = bookmarks != null && !bookmarks.isEmpty(); - boolean txTimeoutPresent = txTimeout != null; - boolean txMetadataPresent = txMetadata != null && !txMetadata.isEmpty(); - boolean accessModePresent = mode == AccessMode.READ; - boolean databaseNamePresent = databaseName != null && !databaseName.equals( ABSENT_DB_NAME ); - - if ( !bookmarksPresent && !txTimeoutPresent && !txMetadataPresent && !accessModePresent && !databaseNamePresent ) - { - return emptyMap(); - } - - Map result = Iterables.newHashMapWithSize( 5 ); - - if ( bookmarksPresent ) - { - result.put( BOOKMARKS_METADATA_KEY, value( bookmarks.values() ) ); - } - if ( txTimeoutPresent ) - { - result.put( TX_TIMEOUT_METADATA_KEY, value( txTimeout.toMillis() ) ); - } - if ( txMetadataPresent ) - { - result.put( TX_METADATA_METADATA_KEY, value( txMetadata ) ); - } - if( accessModePresent ) - { - result.put( MODE_KEY, value( MODE_READ_VALUE ) ); - } - if ( databaseNamePresent ) // only sent if the database name is different from absent - { - result.put( DATABASE_NAME_KEY, value( databaseName ) ); - } - - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/FailureMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/FailureMessage.java deleted file mode 100644 index a012d1c9..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/FailureMessage.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.response; - -import org.neo4j.driver.internal.messaging.Message; - -import static java.lang.String.format; - -/** - * FAILURE response message - *

- * Sent by the server to signal a failed operation. - * Terminates response sequence. - */ -public class FailureMessage implements Message -{ - public final static byte SIGNATURE = 0x7F; - - private final String code; - private final String message; - - public FailureMessage( String code, String message ) - { - super(); - this.code = code; - this.message = message; - } - - public String code() - { - return code; - } - - public String message() - { - return message; - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return format( "FAILURE %s \"%s\"", code, message ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - FailureMessage that = (FailureMessage) o; - - return !(code != null ? !code.equals( that.code ) : that.code != null) && - !(message != null ? !message.equals( that.message ) : that.message != null); - - } - - @Override - public int hashCode() - { - int result = code != null ? code.hashCode() : 0; - result = 31 * result + (message != null ? message.hashCode() : 0); - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/IgnoredMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/IgnoredMessage.java deleted file mode 100644 index 880b5225..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/IgnoredMessage.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.response; - -import org.neo4j.driver.internal.messaging.Message; - -/** - * IGNORED response message - *

- * Sent by the server to signal that an operation has been ignored. - * Terminates response sequence. - */ -public class IgnoredMessage implements Message -{ - public final static byte SIGNATURE = 0x7E; - - public static final IgnoredMessage IGNORED = new IgnoredMessage(); - - private IgnoredMessage() - { - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return "IGNORED {}"; - } - - @Override - public boolean equals( Object obj ) - { - return obj != null && obj.getClass() == getClass(); - } - - @Override - public int hashCode() - { - return 1; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/RecordMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/RecordMessage.java deleted file mode 100644 index 52addcf0..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/RecordMessage.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.response; - -import java.util.Arrays; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.Value; - -public class RecordMessage implements Message -{ - public final static byte SIGNATURE = 0x71; - - private final Value[] fields; - - public RecordMessage( Value[] fields ) - { - this.fields = fields; - } - - public Value[] fields() - { - return fields; - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return "RECORD " + Arrays.toString( fields ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - RecordMessage that = (RecordMessage) o; - - return Arrays.equals( fields, that.fields ); - } - - @Override - public int hashCode() - { - return Arrays.hashCode( fields ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/SuccessMessage.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/SuccessMessage.java deleted file mode 100644 index b3cd0b7b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/response/SuccessMessage.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.response; - -import java.util.Map; - -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.Value; - -import static java.lang.String.format; - -/** - * SUCCESS response message - *

- * Sent by the server to signal a successful operation. - * Terminates response sequence. - */ -public class SuccessMessage implements Message -{ - public final static byte SIGNATURE = 0x70; - - private final Map metadata; - - public SuccessMessage( Map metadata ) - { - this.metadata = metadata; - } - - public Map metadata() - { - return metadata; - } - - @Override - public byte signature() - { - return SIGNATURE; - } - - @Override - public String toString() - { - return format( "SUCCESS %s", metadata ); - } - - @Override - public boolean equals( Object obj ) - { - return obj != null && obj.getClass() == getClass(); - } - - @Override - public int hashCode() - { - return 1; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/BoltProtocolV1.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/BoltProtocolV1.java deleted file mode 100644 index b4d140c0..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/BoltProtocolV1.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v1; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelPromise; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.Value; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.internal.Bookmarks; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.cursor.AsyncResultCursorOnlyFactory; -import org.neo4j.driver.internal.cursor.StatementResultCursorFactory; -import org.neo4j.driver.internal.handlers.AbstractPullAllResponseHandler; -import org.neo4j.driver.internal.handlers.BeginTxResponseHandler; -import org.neo4j.driver.internal.handlers.CommitTxResponseHandler; -import org.neo4j.driver.internal.handlers.InitResponseHandler; -import org.neo4j.driver.internal.handlers.NoOpResponseHandler; -import org.neo4j.driver.internal.handlers.PullHandlers; -import org.neo4j.driver.internal.handlers.RollbackTxResponseHandler; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.request.InitMessage; -import org.neo4j.driver.internal.messaging.request.PullAllMessage; -import org.neo4j.driver.internal.messaging.request.RunMessage; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.spi.ResponseHandler; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.internal.util.MetadataExtractor; - -import static org.neo4j.driver.Values.ofValue; -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.messageDispatcher; -import static org.neo4j.driver.internal.messaging.request.MultiDatabaseUtil.assertEmptyDatabaseName; - -public class BoltProtocolV1 implements BoltProtocol -{ - public static final int VERSION = 1; - - public static final BoltProtocol INSTANCE = new BoltProtocolV1(); - - public static final MetadataExtractor METADATA_EXTRACTOR = new MetadataExtractor( "result_available_after", "result_consumed_after" ); - - private static final String BEGIN_QUERY = "BEGIN"; - private static final Message BEGIN_MESSAGE = new RunMessage( BEGIN_QUERY ); - private static final Message COMMIT_MESSAGE = new RunMessage( "COMMIT" ); - private static final Message ROLLBACK_MESSAGE = new RunMessage( "ROLLBACK" ); - - @Override - public MessageFormat createMessageFormat() - { - return new MessageFormatV1(); - } - - @Override - public void initializeChannel( String userAgent, Map authToken, ChannelPromise channelInitializedPromise ) - { - Channel channel = channelInitializedPromise.channel(); - - InitMessage message = new InitMessage( userAgent, authToken ); - InitResponseHandler handler = new InitResponseHandler( channelInitializedPromise ); - - messageDispatcher( channel ).enqueue( handler ); - channel.writeAndFlush( message, channel.voidPromise() ); - } - - @Override - public void prepareToCloseChannel( Channel channel ) - { - // left empty on purpose. - } - - @Override - public CompletionStage beginTransaction( Connection connection, Bookmarks bookmarks, TransactionConfig config ) - { - try - { - verifyBeforeTransaction( config, connection.databaseName() ); - } - catch ( Exception error ) - { - return Futures.failedFuture( error ); - } - - if ( bookmarks.isEmpty() ) - { - connection.write( - BEGIN_MESSAGE, NoOpResponseHandler.INSTANCE, - PullAllMessage.PULL_ALL, NoOpResponseHandler.INSTANCE ); - - return Futures.completedWithNull(); - } - else - { - CompletableFuture beginTxFuture = new CompletableFuture<>(); - connection.writeAndFlush( - new RunMessage( BEGIN_QUERY, bookmarks.asBeginTransactionParameters() ), NoOpResponseHandler.INSTANCE, - PullAllMessage.PULL_ALL, new BeginTxResponseHandler( beginTxFuture ) ); - - return beginTxFuture; - } - } - - @Override - public CompletionStage commitTransaction( Connection connection ) - { - CompletableFuture commitFuture = new CompletableFuture<>(); - - ResponseHandler pullAllHandler = new CommitTxResponseHandler( commitFuture ); - connection.writeAndFlush( - COMMIT_MESSAGE, NoOpResponseHandler.INSTANCE, - PullAllMessage.PULL_ALL, pullAllHandler ); - - return commitFuture; - } - - @Override - public CompletionStage rollbackTransaction( Connection connection ) - { - CompletableFuture rollbackFuture = new CompletableFuture<>(); - - ResponseHandler pullAllHandler = new RollbackTxResponseHandler( rollbackFuture ); - connection.writeAndFlush( - ROLLBACK_MESSAGE, NoOpResponseHandler.INSTANCE, - PullAllMessage.PULL_ALL, pullAllHandler ); - - return rollbackFuture; - } - - @Override - public StatementResultCursorFactory runInAutoCommitTransaction( Connection connection, Statement statement, - BookmarksHolder bookmarksHolder, TransactionConfig config, boolean waitForRunResponse ) - { - // bookmarks are ignored for auto-commit transactions in this version of the protocol - verifyBeforeTransaction( config, connection.databaseName() ); - return buildResultCursorFactory( connection, statement, null, waitForRunResponse ); - } - - @Override - public StatementResultCursorFactory runInExplicitTransaction( Connection connection, Statement statement, ExplicitTransaction tx, - boolean waitForRunResponse ) - { - return buildResultCursorFactory( connection, statement, tx, waitForRunResponse ); - } - - @Override - public int version() - { - return VERSION; - } - - private static StatementResultCursorFactory buildResultCursorFactory( Connection connection, Statement statement, - ExplicitTransaction tx, boolean waitForRunResponse ) - { - String query = statement.text(); - Map params = statement.parameters().asMap( ofValue() ); - - RunMessage runMessage = new RunMessage( query, params ); - RunResponseHandler runHandler = new RunResponseHandler( METADATA_EXTRACTOR ); - AbstractPullAllResponseHandler pullAllHandler = PullHandlers.newBoltV1PullAllHandler( statement, runHandler, connection, tx ); - - return new AsyncResultCursorOnlyFactory( connection, runMessage, runHandler, pullAllHandler, waitForRunResponse ); - } - - private void verifyBeforeTransaction( TransactionConfig config, String databaseName ) - { - if ( config != null && !config.isEmpty() ) - { - throw txConfigNotSupported(); - } - assertEmptyDatabaseName( databaseName, version() ); - } - - private static ClientException txConfigNotSupported() - { - return new ClientException( "Driver is connected to the database that does not support transaction configuration. " + - "Please upgrade to neo4j 3.5.0 or later in order to use this functionality" ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageFormatV1.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageFormatV1.java deleted file mode 100644 index 76585647..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageFormatV1.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v1; - -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.internal.packstream.PackOutput; - -public class MessageFormatV1 implements MessageFormat -{ - public static final byte NODE = 'N'; - public static final byte RELATIONSHIP = 'R'; - public static final byte UNBOUND_RELATIONSHIP = 'r'; - public static final byte PATH = 'P'; - - public static final int NODE_FIELDS = 3; - - @Override - public MessageFormat.Writer newWriter( PackOutput output, boolean byteArraySupportEnabled ) - { - return new MessageWriterV1( output, byteArraySupportEnabled ); - } - - @Override - public MessageFormat.Reader newReader( PackInput input ) - { - return new MessageReaderV1( input ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageReaderV1.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageReaderV1.java deleted file mode 100644 index 30a411d4..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageReaderV1.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v1; - -import java.io.IOException; -import java.util.Map; - -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.ResponseMessageHandler; -import org.neo4j.driver.internal.messaging.ValueUnpacker; -import org.neo4j.driver.internal.messaging.response.FailureMessage; -import org.neo4j.driver.internal.messaging.response.IgnoredMessage; -import org.neo4j.driver.internal.messaging.response.RecordMessage; -import org.neo4j.driver.internal.messaging.response.SuccessMessage; -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.Value; - -public class MessageReaderV1 implements MessageFormat.Reader -{ - private final ValueUnpacker unpacker; - - public MessageReaderV1( PackInput input ) - { - this( new ValueUnpackerV1( input ) ); - } - - protected MessageReaderV1( ValueUnpacker unpacker ) - { - this.unpacker = unpacker; - } - - @Override - public void read( ResponseMessageHandler handler ) throws IOException - { - unpacker.unpackStructHeader(); - int type = unpacker.unpackStructSignature(); - switch ( type ) - { - case SuccessMessage.SIGNATURE: - unpackSuccessMessage( handler ); - break; - case FailureMessage.SIGNATURE: - unpackFailureMessage( handler ); - break; - case IgnoredMessage.SIGNATURE: - unpackIgnoredMessage( handler ); - break; - case RecordMessage.SIGNATURE: - unpackRecordMessage( handler ); - break; - default: - throw new IOException( "Unknown message type: " + type ); - } - } - - private void unpackSuccessMessage( ResponseMessageHandler output ) throws IOException - { - Map map = unpacker.unpackMap(); - output.handleSuccessMessage( map ); - } - - private void unpackFailureMessage( ResponseMessageHandler output ) throws IOException - { - Map params = unpacker.unpackMap(); - String code = params.get( "code" ).asString(); - String message = params.get( "message" ).asString(); - output.handleFailureMessage( code, message ); - } - - private void unpackIgnoredMessage( ResponseMessageHandler output ) throws IOException - { - output.handleIgnoredMessage(); - } - - private void unpackRecordMessage( ResponseMessageHandler output ) throws IOException - { - Value[] fields = unpacker.unpackArray(); - output.handleRecordMessage( fields ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageWriterV1.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageWriterV1.java deleted file mode 100644 index 5aa8c345..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/MessageWriterV1.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v1; - -import java.util.Map; - -import org.neo4j.driver.internal.messaging.AbstractMessageWriter; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.messaging.encode.DiscardAllMessageEncoder; -import org.neo4j.driver.internal.messaging.encode.InitMessageEncoder; -import org.neo4j.driver.internal.messaging.encode.PullAllMessageEncoder; -import org.neo4j.driver.internal.messaging.encode.ResetMessageEncoder; -import org.neo4j.driver.internal.messaging.encode.RunMessageEncoder; -import org.neo4j.driver.internal.messaging.request.DiscardAllMessage; -import org.neo4j.driver.internal.messaging.request.InitMessage; -import org.neo4j.driver.internal.messaging.request.PullAllMessage; -import org.neo4j.driver.internal.messaging.request.ResetMessage; -import org.neo4j.driver.internal.messaging.request.RunMessage; -import org.neo4j.driver.internal.packstream.PackOutput; -import org.neo4j.driver.internal.util.Iterables; - -public class MessageWriterV1 extends AbstractMessageWriter -{ - public MessageWriterV1( PackOutput output, boolean byteArraySupportEnabled ) - { - this( new ValuePackerV1( output, byteArraySupportEnabled ) ); - } - - protected MessageWriterV1( ValuePacker packer ) - { - super( packer, buildEncoders() ); - } - - private static Map buildEncoders() - { - Map result = Iterables.newHashMapWithSize( 6 ); - result.put( DiscardAllMessage.SIGNATURE, new DiscardAllMessageEncoder() ); - result.put( InitMessage.SIGNATURE, new InitMessageEncoder() ); - result.put( PullAllMessage.SIGNATURE, new PullAllMessageEncoder() ); - result.put( ResetMessage.SIGNATURE, new ResetMessageEncoder() ); - result.put( RunMessage.SIGNATURE, new RunMessageEncoder() ); - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/ValuePackerV1.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/ValuePackerV1.java deleted file mode 100644 index d311ee22..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v1/ValuePackerV1.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v1; - -import java.io.IOException; -import java.util.Map; - -import org.neo4j.driver.internal.messaging.ValuePacker; -import org.neo4j.driver.internal.packstream.PackOutput; -import org.neo4j.driver.internal.packstream.PackStream; -import org.neo4j.driver.internal.value.InternalValue; -import org.neo4j.driver.Value; - -public class ValuePackerV1 implements ValuePacker -{ - protected final PackStream.Packer packer; - - private final boolean byteArraySupportEnabled; - - public ValuePackerV1( PackOutput output, boolean byteArraySupportEnabled ) - { - this.packer = new PackStream.Packer( output ); - this.byteArraySupportEnabled = byteArraySupportEnabled; - } - - @Override - public final void packStructHeader( int size, byte signature ) throws IOException - { - packer.packStructHeader( size, signature ); - } - - @Override - public final void pack( String string ) throws IOException - { - packer.pack( string ); - } - - @Override - public final void pack( Value value ) throws IOException - { - if ( value instanceof InternalValue ) - { - packInternalValue( ((InternalValue) value) ); - } - else - { - throw new IllegalArgumentException( "Unable to pack: " + value ); - } - } - - @Override - public final void pack( Map map ) throws IOException - { - if ( map == null || map.size() == 0 ) - { - packer.packMapHeader( 0 ); - return; - } - packer.packMapHeader( map.size() ); - for ( Map.Entry entry : map.entrySet() ) - { - packer.pack( entry.getKey() ); - pack( entry.getValue() ); - } - } - - protected void packInternalValue( InternalValue value ) throws IOException - { - switch ( value.typeConstructor() ) - { - case NULL: - packer.packNull(); - break; - - case BYTES: - if ( !byteArraySupportEnabled ) - { - throw new PackStream.UnPackable( - "Packing bytes is not supported as the current server this driver connected to does not support unpack bytes." ); - } - packer.pack( value.asByteArray() ); - break; - - case STRING: - packer.pack( value.asString() ); - break; - - case BOOLEAN: - packer.pack( value.asBoolean() ); - break; - - case INTEGER: - packer.pack( value.asLong() ); - break; - - case FLOAT: - packer.pack( value.asDouble() ); - break; - - case MAP: - packer.packMapHeader( value.size() ); - for ( String s : value.keys() ) - { - packer.pack( s ); - pack( value.get( s ) ); - } - break; - - case LIST: - packer.packListHeader( value.size() ); - for ( Value item : value.values() ) - { - pack( item ); - } - break; - - default: - throw new IOException( "Unknown type: " + value.type().name() ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/BoltProtocolV2.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/BoltProtocolV2.java deleted file mode 100644 index 2a376c99..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/BoltProtocolV2.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v2; - -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.v1.BoltProtocolV1; - -public class BoltProtocolV2 extends BoltProtocolV1 -{ - public static final int VERSION = 2; - - public static final BoltProtocol INSTANCE = new BoltProtocolV2(); - - @Override - public MessageFormat createMessageFormat() - { - return new MessageFormatV2(); - } - - @Override - public int version() - { - return VERSION; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageFormatV2.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageFormatV2.java deleted file mode 100644 index 3d91abda..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageFormatV2.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v2; - -import org.neo4j.driver.internal.messaging.v1.MessageFormatV1; -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.internal.packstream.PackOutput; - -public class MessageFormatV2 extends MessageFormatV1 -{ - public static final byte DATE = 'D'; - public static final int DATE_STRUCT_SIZE = 1; - - public static final byte TIME = 'T'; - public static final int TIME_STRUCT_SIZE = 2; - - public static final byte LOCAL_TIME = 't'; - public static final int LOCAL_TIME_STRUCT_SIZE = 1; - - public static final byte LOCAL_DATE_TIME = 'd'; - public static final int LOCAL_DATE_TIME_STRUCT_SIZE = 2; - - public static final byte DATE_TIME_WITH_ZONE_OFFSET = 'F'; - public static final byte DATE_TIME_WITH_ZONE_ID = 'f'; - public static final int DATE_TIME_STRUCT_SIZE = 3; - - public static final byte DURATION = 'E'; - public static final int DURATION_TIME_STRUCT_SIZE = 4; - - public static final byte POINT_2D_STRUCT_TYPE = 'X'; - public static final int POINT_2D_STRUCT_SIZE = 3; - - public static final byte POINT_3D_STRUCT_TYPE = 'Y'; - public static final int POINT_3D_STRUCT_SIZE = 4; - - @Override - public Writer newWriter( PackOutput output, boolean byteArraySupportEnabled ) - { - if ( !byteArraySupportEnabled ) - { - throw new IllegalArgumentException( "Bolt V2 should support byte arrays" ); - } - return new MessageWriterV2( output ); - } - - @Override - public Reader newReader( PackInput input ) - { - return new MessageReaderV2( input ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageReaderV2.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageReaderV2.java deleted file mode 100644 index 21d2113e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageReaderV2.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v2; - -import org.neo4j.driver.internal.messaging.v1.MessageReaderV1; -import org.neo4j.driver.internal.packstream.PackInput; - -public class MessageReaderV2 extends MessageReaderV1 -{ - public MessageReaderV2( PackInput input ) - { - super( new ValueUnpackerV2( input ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageWriterV2.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageWriterV2.java deleted file mode 100644 index 7bd300ed..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/MessageWriterV2.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v2; - -import org.neo4j.driver.internal.messaging.v1.MessageWriterV1; -import org.neo4j.driver.internal.packstream.PackOutput; - -public class MessageWriterV2 extends MessageWriterV1 -{ - public MessageWriterV2( PackOutput output ) - { - super( new ValuePackerV2( output ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/ValueUnpackerV2.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/ValueUnpackerV2.java deleted file mode 100644 index 968f41c6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v2/ValueUnpackerV2.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v2; - -import java.io.IOException; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.time.OffsetTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; - -import org.neo4j.driver.internal.messaging.v1.ValueUnpackerV1; -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.internal.types.TypeConstructor; -import org.neo4j.driver.Value; - -import static java.time.ZoneOffset.UTC; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.DATE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.DATE_STRUCT_SIZE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.DATE_TIME_STRUCT_SIZE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.DATE_TIME_WITH_ZONE_ID; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.DATE_TIME_WITH_ZONE_OFFSET; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.DURATION; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.DURATION_TIME_STRUCT_SIZE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.LOCAL_DATE_TIME; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.LOCAL_DATE_TIME_STRUCT_SIZE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.LOCAL_TIME; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.LOCAL_TIME_STRUCT_SIZE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.POINT_2D_STRUCT_SIZE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.POINT_2D_STRUCT_TYPE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.POINT_3D_STRUCT_SIZE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.POINT_3D_STRUCT_TYPE; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.TIME; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.TIME_STRUCT_SIZE; -import static org.neo4j.driver.Values.isoDuration; -import static org.neo4j.driver.Values.point; -import static org.neo4j.driver.Values.value; - -public class ValueUnpackerV2 extends ValueUnpackerV1 -{ - public ValueUnpackerV2( PackInput input ) - { - super( input ); - } - - @Override - protected Value unpackStruct( long size, byte type ) throws IOException - { - switch ( type ) - { - case DATE: - ensureCorrectStructSize( TypeConstructor.DATE, DATE_STRUCT_SIZE, size ); - return unpackDate(); - case TIME: - ensureCorrectStructSize( TypeConstructor.TIME, TIME_STRUCT_SIZE, size ); - return unpackTime(); - case LOCAL_TIME: - ensureCorrectStructSize( TypeConstructor.LOCAL_TIME, LOCAL_TIME_STRUCT_SIZE, size ); - return unpackLocalTime(); - case LOCAL_DATE_TIME: - ensureCorrectStructSize( TypeConstructor.LOCAL_DATE_TIME, LOCAL_DATE_TIME_STRUCT_SIZE, size ); - return unpackLocalDateTime(); - case DATE_TIME_WITH_ZONE_OFFSET: - ensureCorrectStructSize( TypeConstructor.DATE_TIME, DATE_TIME_STRUCT_SIZE, size ); - return unpackDateTimeWithZoneOffset(); - case DATE_TIME_WITH_ZONE_ID: - ensureCorrectStructSize( TypeConstructor.DATE_TIME, DATE_TIME_STRUCT_SIZE, size ); - return unpackDateTimeWithZoneId(); - case DURATION: - ensureCorrectStructSize( TypeConstructor.DURATION, DURATION_TIME_STRUCT_SIZE, size ); - return unpackDuration(); - case POINT_2D_STRUCT_TYPE: - ensureCorrectStructSize( TypeConstructor.POINT, POINT_2D_STRUCT_SIZE, size ); - return unpackPoint2D(); - case POINT_3D_STRUCT_TYPE: - ensureCorrectStructSize( TypeConstructor.POINT, POINT_3D_STRUCT_SIZE, size ); - return unpackPoint3D(); - default: - return super.unpackStruct( size, type ); - } - } - - private Value unpackDate() throws IOException - { - long epochDay = unpacker.unpackLong(); - return value( LocalDate.ofEpochDay( epochDay ) ); - } - - private Value unpackTime() throws IOException - { - long nanoOfDayLocal = unpacker.unpackLong(); - int offsetSeconds = Math.toIntExact( unpacker.unpackLong() ); - - LocalTime localTime = LocalTime.ofNanoOfDay( nanoOfDayLocal ); - ZoneOffset offset = ZoneOffset.ofTotalSeconds( offsetSeconds ); - return value( OffsetTime.of( localTime, offset ) ); - } - - private Value unpackLocalTime() throws IOException - { - long nanoOfDayLocal = unpacker.unpackLong(); - return value( LocalTime.ofNanoOfDay( nanoOfDayLocal ) ); - } - - private Value unpackLocalDateTime() throws IOException - { - long epochSecondUtc = unpacker.unpackLong(); - int nano = Math.toIntExact( unpacker.unpackLong() ); - return value( LocalDateTime.ofEpochSecond( epochSecondUtc, nano, UTC ) ); - } - - private Value unpackDateTimeWithZoneOffset() throws IOException - { - long epochSecondLocal = unpacker.unpackLong(); - int nano = Math.toIntExact( unpacker.unpackLong() ); - int offsetSeconds = Math.toIntExact( unpacker.unpackLong() ); - return value( newZonedDateTime( epochSecondLocal, nano, ZoneOffset.ofTotalSeconds( offsetSeconds ) ) ); - } - - private Value unpackDateTimeWithZoneId() throws IOException - { - long epochSecondLocal = unpacker.unpackLong(); - int nano = Math.toIntExact( unpacker.unpackLong() ); - String zoneIdString = unpacker.unpackString(); - return value( newZonedDateTime( epochSecondLocal, nano, ZoneId.of( zoneIdString ) ) ); - } - - private Value unpackDuration() throws IOException - { - long months = unpacker.unpackLong(); - long days = unpacker.unpackLong(); - long seconds = unpacker.unpackLong(); - int nanoseconds = Math.toIntExact( unpacker.unpackLong() ); - return isoDuration( months, days, seconds, nanoseconds ); - } - - private Value unpackPoint2D() throws IOException - { - int srid = Math.toIntExact( unpacker.unpackLong() ); - double x = unpacker.unpackDouble(); - double y = unpacker.unpackDouble(); - return point( srid, x, y ); - } - - private Value unpackPoint3D() throws IOException - { - int srid = Math.toIntExact( unpacker.unpackLong() ); - double x = unpacker.unpackDouble(); - double y = unpacker.unpackDouble(); - double z = unpacker.unpackDouble(); - return point( srid, x, y, z ); - } - - private static ZonedDateTime newZonedDateTime( long epochSecondLocal, long nano, ZoneId zoneId ) - { - Instant instant = Instant.ofEpochSecond( epochSecondLocal, nano ); - LocalDateTime localDateTime = LocalDateTime.ofInstant( instant, UTC ); - return ZonedDateTime.of( localDateTime, zoneId ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/BoltProtocolV3.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/BoltProtocolV3.java deleted file mode 100644 index 1fa439f5..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/BoltProtocolV3.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v3; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelPromise; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.Value; -import org.neo4j.driver.internal.Bookmarks; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.cursor.AsyncResultCursorOnlyFactory; -import org.neo4j.driver.internal.cursor.StatementResultCursorFactory; -import org.neo4j.driver.internal.handlers.AbstractPullAllResponseHandler; -import org.neo4j.driver.internal.handlers.BeginTxResponseHandler; -import org.neo4j.driver.internal.handlers.CommitTxResponseHandler; -import org.neo4j.driver.internal.handlers.HelloResponseHandler; -import org.neo4j.driver.internal.handlers.NoOpResponseHandler; -import org.neo4j.driver.internal.handlers.RollbackTxResponseHandler; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.request.BeginMessage; -import org.neo4j.driver.internal.messaging.request.GoodbyeMessage; -import org.neo4j.driver.internal.messaging.request.HelloMessage; -import org.neo4j.driver.internal.messaging.request.RunWithMetadataMessage; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.internal.util.MetadataExtractor; - -import static org.neo4j.driver.internal.async.connection.ChannelAttributes.messageDispatcher; -import static org.neo4j.driver.internal.handlers.PullHandlers.newBoltV3PullAllHandler; -import static org.neo4j.driver.internal.messaging.request.CommitMessage.COMMIT; -import static org.neo4j.driver.internal.messaging.request.MultiDatabaseUtil.assertEmptyDatabaseName; -import static org.neo4j.driver.internal.messaging.request.RollbackMessage.ROLLBACK; -import static org.neo4j.driver.internal.messaging.request.RunWithMetadataMessage.autoCommitTxRunMessage; -import static org.neo4j.driver.internal.messaging.request.RunWithMetadataMessage.explicitTxRunMessage; - -public class BoltProtocolV3 implements BoltProtocol -{ - public static final int VERSION = 3; - - public static final BoltProtocol INSTANCE = new BoltProtocolV3(); - - public static final MetadataExtractor METADATA_EXTRACTOR = new MetadataExtractor( "t_first", "t_last" ); - - @Override - public MessageFormat createMessageFormat() - { - return new MessageFormatV3(); - } - - @Override - public void initializeChannel( String userAgent, Map authToken, ChannelPromise channelInitializedPromise ) - { - Channel channel = channelInitializedPromise.channel(); - - HelloMessage message = new HelloMessage( userAgent, authToken ); - HelloResponseHandler handler = new HelloResponseHandler( channelInitializedPromise ); - - messageDispatcher( channel ).enqueue( handler ); - channel.writeAndFlush( message, channel.voidPromise() ); - } - - @Override - public void prepareToCloseChannel( Channel channel ) - { - GoodbyeMessage message = GoodbyeMessage.GOODBYE; - messageDispatcher( channel ).enqueue( NoOpResponseHandler.INSTANCE ); - channel.writeAndFlush( message, channel.voidPromise() ); - } - - @Override - public CompletionStage beginTransaction( Connection connection, Bookmarks bookmarks, TransactionConfig config ) - { - try - { - verifyDatabaseNameBeforeTransaction( connection.databaseName() ); - } - catch ( Exception error ) - { - return Futures.failedFuture( error ); - } - - BeginMessage beginMessage = new BeginMessage( bookmarks, config, connection.databaseName(), connection.mode() ); - - if ( bookmarks.isEmpty() ) - { - connection.write( beginMessage, NoOpResponseHandler.INSTANCE ); - return Futures.completedWithNull(); - } - else - { - CompletableFuture beginTxFuture = new CompletableFuture<>(); - connection.writeAndFlush( beginMessage, new BeginTxResponseHandler( beginTxFuture ) ); - return beginTxFuture; - } - } - - @Override - public CompletionStage commitTransaction( Connection connection ) - { - CompletableFuture commitFuture = new CompletableFuture<>(); - connection.writeAndFlush( COMMIT, new CommitTxResponseHandler( commitFuture ) ); - return commitFuture; - } - - @Override - public CompletionStage rollbackTransaction( Connection connection ) - { - CompletableFuture rollbackFuture = new CompletableFuture<>(); - connection.writeAndFlush( ROLLBACK, new RollbackTxResponseHandler( rollbackFuture ) ); - return rollbackFuture; - } - - @Override - public StatementResultCursorFactory runInAutoCommitTransaction( Connection connection, Statement statement, - BookmarksHolder bookmarksHolder, TransactionConfig config, boolean waitForRunResponse ) - { - verifyDatabaseNameBeforeTransaction( connection.databaseName() ); - RunWithMetadataMessage runMessage = - autoCommitTxRunMessage( statement, config, connection.databaseName(), connection.mode(), bookmarksHolder.getBookmarks() ); - return buildResultCursorFactory( connection, statement, bookmarksHolder, null, runMessage, waitForRunResponse ); - } - - @Override - public StatementResultCursorFactory runInExplicitTransaction( Connection connection, Statement statement, ExplicitTransaction tx, - boolean waitForRunResponse ) - { - RunWithMetadataMessage runMessage = explicitTxRunMessage( statement ); - return buildResultCursorFactory( connection, statement, BookmarksHolder.NO_OP, tx, runMessage, waitForRunResponse ); - } - - protected StatementResultCursorFactory buildResultCursorFactory( Connection connection, Statement statement, BookmarksHolder bookmarksHolder, - ExplicitTransaction tx, RunWithMetadataMessage runMessage, boolean waitForRunResponse ) - { - RunResponseHandler runHandler = new RunResponseHandler( METADATA_EXTRACTOR ); - AbstractPullAllResponseHandler pullHandler = newBoltV3PullAllHandler( statement, runHandler, connection, bookmarksHolder, tx ); - - return new AsyncResultCursorOnlyFactory( connection, runMessage, runHandler, pullHandler, waitForRunResponse ); - } - - protected void verifyDatabaseNameBeforeTransaction( String databaseName ) - { - assertEmptyDatabaseName( databaseName, version() ); - } - - @Override - public int version() - { - return VERSION; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/MessageFormatV3.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/MessageFormatV3.java deleted file mode 100644 index 91e34542..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v3/MessageFormatV3.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v3; - -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.v2.MessageReaderV2; -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.internal.packstream.PackOutput; - -public class MessageFormatV3 implements MessageFormat -{ - @Override - public Writer newWriter( PackOutput output, boolean byteArraySupportEnabled ) - { - return new MessageWriterV3( output ); - } - - @Override - public Reader newReader( PackInput input ) - { - return new MessageReaderV2( input ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/BoltProtocolV4.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/BoltProtocolV4.java deleted file mode 100644 index 9a2b33b2..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/BoltProtocolV4.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v4; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.internal.BookmarksHolder; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.cursor.InternalStatementResultCursorFactory; -import org.neo4j.driver.internal.cursor.StatementResultCursorFactory; -import org.neo4j.driver.internal.handlers.AbstractPullAllResponseHandler; -import org.neo4j.driver.internal.handlers.RunResponseHandler; -import org.neo4j.driver.internal.handlers.pulln.BasicPullResponseHandler; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.request.RunWithMetadataMessage; -import org.neo4j.driver.internal.messaging.v3.BoltProtocolV3; -import org.neo4j.driver.internal.spi.Connection; - -import static org.neo4j.driver.internal.handlers.PullHandlers.newBoltV3PullAllHandler; -import static org.neo4j.driver.internal.handlers.PullHandlers.newBoltV4PullHandler; - -public class BoltProtocolV4 extends BoltProtocolV3 -{ - public static final int VERSION = 4; - public static final BoltProtocol INSTANCE = new BoltProtocolV4(); - - @Override - public MessageFormat createMessageFormat() - { - return new MessageFormatV4(); - } - - @Override - protected StatementResultCursorFactory buildResultCursorFactory( Connection connection, Statement statement, BookmarksHolder bookmarksHolder, - ExplicitTransaction tx, RunWithMetadataMessage runMessage, boolean waitForRunResponse ) - { - RunResponseHandler runHandler = new RunResponseHandler( METADATA_EXTRACTOR ); - - AbstractPullAllResponseHandler pullAllHandler = newBoltV3PullAllHandler( statement, runHandler, connection, bookmarksHolder, tx ); - BasicPullResponseHandler pullHandler = newBoltV4PullHandler( statement, runHandler, connection, bookmarksHolder, tx ); - - return new InternalStatementResultCursorFactory( connection, runMessage, runHandler, pullHandler, pullAllHandler, waitForRunResponse ); - } - - protected void verifyDatabaseNameBeforeTransaction( String databaseName ) - { - // Bolt V4 accepts database name - } - - @Override - public int version() - { - return VERSION; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/MessageFormatV4.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/MessageFormatV4.java deleted file mode 100644 index 8524d52c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v4/MessageFormatV4.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v4; - -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.v2.MessageReaderV2; -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.internal.packstream.PackOutput; - -public class MessageFormatV4 implements MessageFormat -{ - @Override - public Writer newWriter( PackOutput output, boolean byteArraySupportEnabled ) - { - return new MessageWriterV4( output ); - } - - @Override - public Reader newReader( PackInput input ) - { - return new MessageReaderV2( input ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/BoltProtocolV5.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/BoltProtocolV5.java deleted file mode 100644 index c3ae54e9..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/BoltProtocolV5.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v5; - -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.v4.BoltProtocolV4; - -public class BoltProtocolV5 extends BoltProtocolV4 { - public static final int VERSION = 5; - public static final BoltProtocol INSTANCE = new BoltProtocolV5(); - - @Override - public MessageFormat createMessageFormat() { - return new MessageFormatV5(); - } - - @Override - public int version() { - return VERSION; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageFormatV5.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageFormatV5.java deleted file mode 100644 index 72d86ecd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageFormatV5.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v5; - -import org.neo4j.driver.internal.messaging.MessageFormat; -import org.neo4j.driver.internal.messaging.v2.MessageReaderV2; -import org.neo4j.driver.internal.messaging.v4.MessageWriterV4; -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.internal.packstream.PackOutput; - -public class MessageFormatV5 implements MessageFormat -{ - @Override - public Writer newWriter( PackOutput output, boolean byteArraySupportEnabled ) - { - return new MessageWriterV5( output ); - } - - @Override - public Reader newReader( PackInput input ) - { - return new MessageReaderV5( input ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageReaderV5.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageReaderV5.java deleted file mode 100644 index 485a9c49..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageReaderV5.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v5; - -import org.neo4j.driver.internal.messaging.v1.MessageReaderV1; -import org.neo4j.driver.internal.messaging.v2.ValueUnpackerV2; -import org.neo4j.driver.internal.packstream.PackInput; - -public class MessageReaderV5 extends MessageReaderV1 -{ - public MessageReaderV5( PackInput input ) - { - super( new ValueUnpackerV5( input ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageWriterV5.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageWriterV5.java deleted file mode 100644 index 9af3771f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/MessageWriterV5.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v5; - -import cn.graiph.blob.BlobMessageSignature; -import org.neo4j.driver.internal.GetBlobMessageEncoder; -import org.neo4j.driver.internal.messaging.AbstractMessageWriter; -import org.neo4j.driver.internal.messaging.MessageEncoder; -import org.neo4j.driver.internal.messaging.encode.*; -import org.neo4j.driver.internal.messaging.request.*; -import org.neo4j.driver.internal.packstream.PackOutput; -import org.neo4j.driver.internal.util.Iterables; - -import java.util.Map; - -public class MessageWriterV5 extends AbstractMessageWriter -{ - public MessageWriterV5(PackOutput output ) - { - super( new ValuePackerV5( output ), buildEncoders() ); - } - - private static Map buildEncoders() - { - Map result = Iterables.newHashMapWithSize( 9 + 1 ); - result.put( HelloMessage.SIGNATURE, new HelloMessageEncoder() ); - result.put( GoodbyeMessage.SIGNATURE, new GoodbyeMessageEncoder() ); - result.put( RunWithMetadataMessage.SIGNATURE, new RunWithMetadataMessageEncoder() ); - - result.put( DiscardMessage.SIGNATURE, new DiscardMessageEncoder() ); // new - result.put( PullMessage.SIGNATURE, new PullMessageEncoder() ); // new - - //GetBlobMessageEncoder - result.put(BlobMessageSignature.SIGNATURE_GET_BLOB(), new GetBlobMessageEncoder() ); // new - - result.put( BeginMessage.SIGNATURE, new BeginMessageEncoder() ); - result.put( CommitMessage.SIGNATURE, new CommitMessageEncoder() ); - result.put( RollbackMessage.SIGNATURE, new RollbackMessageEncoder() ); - - result.put( ResetMessage.SIGNATURE, new ResetMessageEncoder() ); - return result; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/ValuePackerV5.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/ValuePackerV5.java deleted file mode 100644 index 33003f31..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/ValuePackerV5.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v5; - -import org.neo4j.driver.internal.messaging.v2.ValuePackerV2; -import org.neo4j.driver.internal.packstream.PackOutput; -import org.neo4j.driver.internal.types.TypeConstructor; -import org.neo4j.driver.internal.util.BoltClientBlobIO; -import org.neo4j.driver.internal.value.InternalValue; - -import java.io.IOException; - -public class ValuePackerV5 extends ValuePackerV2 { - public ValuePackerV5(PackOutput output) { - super(output); - } - - @Override - protected void packInternalValue(InternalValue value) throws IOException { - TypeConstructor typeConstructor = value.typeConstructor(); - - //blob - if (TypeConstructor.BLOB == typeConstructor) { - BoltClientBlobIO.packBlob(value.asBlob(), packer); - return; - } - - super.packInternalValue(value); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/ValueUnpackerV5.java b/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/ValueUnpackerV5.java deleted file mode 100644 index 77bd426a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/messaging/v5/ValueUnpackerV5.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.messaging.v5; - -import org.neo4j.driver.Value; -import org.neo4j.driver.internal.messaging.v1.ValueUnpackerV1; -import org.neo4j.driver.internal.messaging.v2.ValueUnpackerV2; -import org.neo4j.driver.internal.packstream.PackInput; -import org.neo4j.driver.internal.packstream.PackType; -import org.neo4j.driver.internal.types.TypeConstructor; -import org.neo4j.driver.internal.util.BoltClientBlobIO; -import org.neo4j.driver.internal.value.ListValue; -import org.neo4j.driver.internal.value.MapValue; - -import java.io.IOException; -import java.time.*; - -import static java.time.ZoneOffset.UTC; -import static org.neo4j.driver.Values.*; -import static org.neo4j.driver.internal.messaging.v2.MessageFormatV2.*; - -public class ValueUnpackerV5 extends ValueUnpackerV2 -{ - public ValueUnpackerV5( PackInput input ) - { - super( input ); - } - - protected Value unpack() throws IOException - { - //NOTE: blob support - Value blobValue = BoltClientBlobIO.unpackBlob(unpacker); - if (blobValue != null) - return blobValue; - - return super.unpack(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/ConnectionPoolMetricsListener.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/ConnectionPoolMetricsListener.java deleted file mode 100644 index 0cfc9f79..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/ConnectionPoolMetricsListener.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -public interface ConnectionPoolMetricsListener -{ - /** - * Invoked before a connection is creating. - */ - void beforeCreating( ListenerEvent listenerEvent ); - - /** - * Invoked after a connection is created successfully. - */ - void afterCreated( ListenerEvent listenerEvent ); - - /** - * Invoked after a connection is failed to create due to timeout, any kind of error. - */ - void afterFailedToCreate(); - - /** - * Invoked after a connection is closed. - */ - void afterClosed(); - - /** - * Invoked before acquiring or creating a connection. - * @param acquireEvent - */ - void beforeAcquiringOrCreating( ListenerEvent acquireEvent ); - - /** - * Invoked after a connection is being acquired or created regardless weather it is successful or not. - */ - void afterAcquiringOrCreating(); - - /** - * Invoked after a connection is acquired or created successfully. - * @param acquireEvent - */ - void afterAcquiredOrCreated( ListenerEvent acquireEvent ); - - /** - * Invoked after it is timed out to acquire or create a connection. - */ - void afterTimedOutToAcquireOrCreate(); - - /** - * After a connection is acquired from the pool. - * @param inUseEvent - */ - void acquired( ListenerEvent inUseEvent ); - - /** - * After a connection is released back to pool. - * @param inUseEvent - */ - void released( ListenerEvent inUseEvent ); -} - diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalAbstractMetrics.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalAbstractMetrics.java deleted file mode 100644 index 8d64cee8..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalAbstractMetrics.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import java.util.Collections; -import java.util.Map; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.pool.ConnectionPoolImpl; -import org.neo4j.driver.ConnectionPoolMetrics; -import org.neo4j.driver.Metrics; - -public abstract class InternalAbstractMetrics implements Metrics, MetricsListener -{ - public static final InternalAbstractMetrics DEV_NULL_METRICS = new InternalAbstractMetrics() - { - @Override - public void beforeCreating( BoltServerAddress serverAddress, ListenerEvent creatingEvent ) - { - - } - - @Override - public void afterCreated( BoltServerAddress serverAddress, ListenerEvent creatingEvent ) - { - - } - - @Override - public void afterFailedToCreate( BoltServerAddress serverAddress ) - { - - } - - @Override - public void afterClosed( BoltServerAddress serverAddress ) - { - - } - - @Override - public void afterTimedOutToAcquireOrCreate( BoltServerAddress serverAddress ) - { - - } - - @Override - public void beforeAcquiringOrCreating( BoltServerAddress serverAddress, ListenerEvent acquireEvent ) - { - - } - - @Override - public void afterAcquiringOrCreating( BoltServerAddress serverAddress ) - { - - } - - @Override - public void afterAcquiredOrCreated( BoltServerAddress serverAddress, ListenerEvent acquireEvent ) - { - - } - - @Override - public void afterConnectionCreated( BoltServerAddress serverAddress, ListenerEvent inUseEvent ) - { - - } - - @Override - public void afterConnectionReleased( BoltServerAddress serverAddress, ListenerEvent inUseEvent ) - { - - } - - @Override - public ListenerEvent createListenerEvent() - { - return ListenerEvent.DEV_NULL_LISTENER_EVENT; - } - - @Override - public void putPoolMetrics( BoltServerAddress address, ConnectionPoolImpl connectionPool ) - { - - } - - @Override - public Map connectionPoolMetrics() - { - return Collections.emptyMap(); - } - - @Override - public Metrics snapshot() - { - return this; - } - - @Override - public String toString() - { - return "Driver metrics not available while driver metrics is not enabled."; - } - }; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalConnectionPoolMetrics.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalConnectionPoolMetrics.java deleted file mode 100644 index bfe0fc14..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalConnectionPoolMetrics.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.spi.ConnectionPool; -import org.neo4j.driver.ConnectionPoolMetrics; - -import static java.lang.String.format; -import static org.neo4j.driver.internal.metrics.InternalMetrics.serverAddressToUniqueName; - -public class InternalConnectionPoolMetrics implements ConnectionPoolMetrics, ConnectionPoolMetricsListener -{ - private final BoltServerAddress address; - private final ConnectionPool pool; - - private final AtomicLong closed = new AtomicLong(); - - // creating = created + failedToCreate - private final AtomicInteger creating = new AtomicInteger(); - private final AtomicLong created = new AtomicLong(); - private final AtomicLong failedToCreate = new AtomicLong(); - - // acquiring = acquired + timedOutToAcquire + failedToAcquireDueToOtherFailures (which we do not keep track) - private final AtomicInteger acquiring = new AtomicInteger(); - private final AtomicLong acquired = new AtomicLong(); - private final AtomicLong timedOutToAcquire = new AtomicLong(); - - private final AtomicLong totalAcquisitionTime = new AtomicLong(); - private final AtomicLong totalConnectionTime = new AtomicLong(); - private final AtomicLong totalInUseTime = new AtomicLong(); - - private final AtomicLong totalInUseCount = new AtomicLong(); - - public InternalConnectionPoolMetrics( BoltServerAddress address, ConnectionPool pool ) - { - Objects.requireNonNull( address ); - Objects.requireNonNull( pool ); - - this.address = address; - this.pool = pool; - } - - @Override - public void beforeCreating( ListenerEvent connEvent ) - { - creating.incrementAndGet(); - connEvent.start(); - } - - @Override - public void afterFailedToCreate() - { - failedToCreate.incrementAndGet(); - creating.decrementAndGet(); - } - - @Override - public void afterCreated( ListenerEvent connEvent ) - { - created.incrementAndGet(); - creating.decrementAndGet(); - long elapsed = connEvent.elapsed(); - - totalConnectionTime.addAndGet( elapsed ); - } - - @Override - public void afterClosed() - { - closed.incrementAndGet(); - } - - @Override - public void beforeAcquiringOrCreating( ListenerEvent acquireEvent ) - { - acquireEvent.start(); - acquiring.incrementAndGet(); - } - - @Override - public void afterAcquiringOrCreating() - { - acquiring.decrementAndGet(); - } - - @Override - public void afterAcquiredOrCreated( ListenerEvent acquireEvent ) - { - acquired.incrementAndGet(); - long elapsed = acquireEvent.elapsed(); - - totalAcquisitionTime.addAndGet( elapsed ); - } - - @Override - public void afterTimedOutToAcquireOrCreate() - { - timedOutToAcquire.incrementAndGet(); - } - - @Override - public void acquired( ListenerEvent inUseEvent ) - { - inUseEvent.start(); - } - - @Override - public void released( ListenerEvent inUseEvent ) - { - totalInUseCount.incrementAndGet(); - long elapsed = inUseEvent.elapsed(); - - totalInUseTime.addAndGet( elapsed ); - } - - @Override - public String id() - { - return serverAddressToUniqueName( address ); - } - - @Override - public PoolStatus poolStatus() - { - if ( pool.isOpen( address ) ) - { - return PoolStatus.OPEN; - } - else - { - return PoolStatus.CLOSED; - } - } - - @Override - public int inUse() - { - return pool.inUseConnections( address ); - } - - @Override - public int idle() - { - return pool.idleConnections( address ); - } - - @Override - public int creating() - { - return creating.get(); - } - - @Override - public long created() - { - return created.get(); - } - - @Override - public long failedToCreate() - { - return failedToCreate.get(); - } - - @Override - public long timedOutToAcquire() - { - return timedOutToAcquire.get(); - } - - @Override - public long totalAcquisitionTime() - { - return totalAcquisitionTime.get(); - } - - @Override - public long totalConnectionTime() - { - return totalConnectionTime.get(); - } - - @Override - public long totalInUseTime() - { - return totalInUseTime.get(); - } - - @Override - public long totalInUseCount() - { - return totalInUseCount.get(); - } - - @Override - public ConnectionPoolMetrics snapshot() - { - return new SnapshotConnectionPoolMetrics( this ); - } - - @Override - public long closed() - { - return closed.get(); - } - - @Override - public int acquiring() - { - return acquiring.get(); - } - - @Override - public long acquired() - { - return this.acquired.get(); - } - - - @Override - public String toString() - { - return format( "[created=%s, closed=%s, creating=%s, failedToCreate=%s, acquiring=%s, acquired=%s, " + - "timedOutToAcquire=%s, inUse=%s, idle=%s, poolStatus=%s, " + - "totalAcquisitionTime=%s, totalConnectionTime=%s, totalInUseTime=%s, totalInUseCount=%s]", - created(), closed(), creating(), failedToCreate(), acquiring(), acquired(), - timedOutToAcquire(), inUse(), idle(), poolStatus(), - totalAcquisitionTime(), totalConnectionTime(), totalInUseTime(), totalInUseCount() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalMetrics.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalMetrics.java deleted file mode 100644 index f37985f1..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalMetrics.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.pool.ConnectionPoolImpl; -import org.neo4j.driver.ConnectionPoolMetrics; -import org.neo4j.driver.Metrics; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.exceptions.ClientException; - -import static java.lang.String.format; -import static java.util.Collections.unmodifiableMap; - -public class InternalMetrics extends InternalAbstractMetrics -{ - private final Map connectionPoolMetrics; - private final Clock clock; - - public InternalMetrics( Clock clock ) - { - Objects.requireNonNull( clock ); - this.connectionPoolMetrics = new ConcurrentHashMap<>(); - this.clock = clock; - } - - @Override - public void putPoolMetrics( BoltServerAddress serverAddress, ConnectionPoolImpl pool ) - { - this.connectionPoolMetrics.put( serverAddressToUniqueName( serverAddress ), - new InternalConnectionPoolMetrics( serverAddress, pool ) ); - } - - @Override - public void beforeCreating( BoltServerAddress serverAddress, ListenerEvent creatingEvent ) - { - poolMetrics( serverAddress ).beforeCreating( creatingEvent ); - } - - @Override - public void afterCreated( BoltServerAddress serverAddress, ListenerEvent creatingEvent ) - { - poolMetrics( serverAddress ).afterCreated( creatingEvent ); - } - - @Override - public void afterFailedToCreate( BoltServerAddress serverAddress ) - { - poolMetrics( serverAddress ).afterFailedToCreate(); - } - - @Override - public void afterClosed( BoltServerAddress serverAddress ) - { - poolMetrics( serverAddress ).afterClosed(); - } - - @Override - public void beforeAcquiringOrCreating( BoltServerAddress serverAddress, ListenerEvent acquireEvent ) - { - poolMetrics( serverAddress ).beforeAcquiringOrCreating( acquireEvent ); - } - - @Override - public void afterAcquiringOrCreating( BoltServerAddress serverAddress ) - { - poolMetrics( serverAddress ).afterAcquiringOrCreating(); - } - - @Override - public void afterAcquiredOrCreated( BoltServerAddress serverAddress, ListenerEvent acquireEvent ) - { - poolMetrics( serverAddress ).afterAcquiredOrCreated( acquireEvent ); - } - - @Override - public void afterConnectionCreated( BoltServerAddress serverAddress, ListenerEvent inUseEvent ) - { - poolMetrics( serverAddress ).acquired( inUseEvent ); - } - - @Override - public void afterConnectionReleased( BoltServerAddress serverAddress, ListenerEvent inUseEvent ) - { - poolMetrics( serverAddress ).released( inUseEvent ); - } - - @Override - public void afterTimedOutToAcquireOrCreate( BoltServerAddress serverAddress ) - { - poolMetrics( serverAddress ).afterTimedOutToAcquireOrCreate(); - } - - @Override - public ListenerEvent createListenerEvent() - { - return new TimeRecorderListenerEvent( clock ); - } - - @Override - public Map connectionPoolMetrics() - { - return unmodifiableMap( this.connectionPoolMetrics ); - } - - @Override - public Metrics snapshot() - { - return new SnapshotMetrics( this ); - } - - @Override - public String toString() - { - return format( "PoolMetrics=%s", connectionPoolMetrics ); - } - - static String serverAddressToUniqueName( BoltServerAddress serverAddress ) - { - return serverAddress.toString(); - } - - private ConnectionPoolMetricsListener poolMetrics( BoltServerAddress serverAddress ) - { - InternalConnectionPoolMetrics poolMetrics = - (InternalConnectionPoolMetrics) this.connectionPoolMetrics.get( serverAddressToUniqueName( serverAddress ) ); - if ( poolMetrics == null ) - { - throw new ClientException( format( "Failed to find pool metrics for server `%s` in %s", serverAddress, this.connectionPoolMetrics ) ); - } - return poolMetrics; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalMetricsProvider.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalMetricsProvider.java deleted file mode 100644 index da88e080..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/InternalMetricsProvider.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.Metrics; - -public class InternalMetricsProvider implements MetricsProvider -{ - private final InternalMetrics metrics; - - public InternalMetricsProvider( Clock clock ) - { - this.metrics = new InternalMetrics( clock ); - } - - @Override - public Metrics metrics() - { - return metrics; - } - - @Override - public MetricsListener metricsListener() - { - return metrics; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/ListenerEvent.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/ListenerEvent.java deleted file mode 100644 index ba6bb993..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/ListenerEvent.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -public interface ListenerEvent -{ - public ListenerEvent DEV_NULL_LISTENER_EVENT = new ListenerEvent() - { - @Override - public void start() - { - } - - @Override - public long elapsed() - { - return 0; - } - }; - - void start(); - long elapsed(); -} - diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/MetricsListener.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/MetricsListener.java deleted file mode 100644 index 0d5e9288..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/MetricsListener.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import java.util.concurrent.TimeUnit; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.async.connection.DirectConnection; -import org.neo4j.driver.internal.async.pool.ConnectionPoolImpl; -import org.neo4j.driver.Config; - -public interface MetricsListener -{ - /** - * Before creating a netty channel. - * @param serverAddress the server the netty channel binds to. - * @param creatingEvent a connection listener event registered when a connection is creating. - */ - void beforeCreating( BoltServerAddress serverAddress, ListenerEvent creatingEvent ); - - /** - * After a netty channel is created successfully. - * @param serverAddress the server the netty channel binds to. - */ - void afterCreated( BoltServerAddress serverAddress, ListenerEvent creatingEvent ); - - /** - * After a netty channel is created with a failure. - * @param serverAddress the server the netty channel binds to. - */ - void afterFailedToCreate( BoltServerAddress serverAddress ); - - /** - * After a netty channel is closed successfully. - * @param serverAddress the server the netty channel binds to. - */ - void afterClosed( BoltServerAddress serverAddress ); - - /** - * Before acquiring or creating a new netty channel from pool. - * @param serverAddress the server the netty channel binds to. - * @param acquireEvent a pool listener event registered in pool for this acquire event. - */ - void beforeAcquiringOrCreating( BoltServerAddress serverAddress, ListenerEvent acquireEvent ); - - /** - * After acquiring or creating a new netty channel from pool regardless it is successful or not. - * @param serverAddress the server the netty channel binds to. - */ - void afterAcquiringOrCreating( BoltServerAddress serverAddress ); - - /** - * After acquiring or creating a new netty channel from pool successfully. - * @param serverAddress the server the netty channel binds to. - * @param acquireEvent a pool listener event registered in pool for this acquire event. - */ - void afterAcquiredOrCreated( BoltServerAddress serverAddress, ListenerEvent acquireEvent ); - - /** - * After we failed to acquire a connection from pool within maximum connection acquisition timeout set by - * {@link Config.ConfigBuilder#withConnectionAcquisitionTimeout(long, TimeUnit)}. - * @param serverAddress - */ - void afterTimedOutToAcquireOrCreate( BoltServerAddress serverAddress ); - - /** - * After acquiring or creating a new netty channel from pool successfully. - * @param serverAddress the server the netty channel binds to. - * @param inUseEvent a connection listener registered with a {@link DirectConnection} when created. - */ - void afterConnectionCreated( BoltServerAddress serverAddress, ListenerEvent inUseEvent ); - - /** - * After releasing a netty channel back to pool successfully. - * @param serverAddress the server the netty channel binds to. - * @param inUseEvent a connection listener registered with a {@link DirectConnection} when destroyed. - */ - void afterConnectionReleased( BoltServerAddress serverAddress, ListenerEvent inUseEvent ); - - ListenerEvent createListenerEvent(); - - void putPoolMetrics( BoltServerAddress address, ConnectionPoolImpl connectionPool ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/MetricsProvider.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/MetricsProvider.java deleted file mode 100644 index 5b0a6cec..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/MetricsProvider.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import org.neo4j.driver.Metrics; -import org.neo4j.driver.exceptions.ClientException; - -import static org.neo4j.driver.internal.metrics.InternalAbstractMetrics.DEV_NULL_METRICS; - -public interface MetricsProvider -{ - MetricsProvider METRICS_DISABLED_PROVIDER = new MetricsProvider() - { - @Override - public Metrics metrics() - { - // To outside users, we forbidden their access to the metrics API - throw new ClientException( "Driver metrics not enabled. To access driver metrics, " + - "you need to enabled driver metrics in the driver's configuration." ); - } - - @Override - public MetricsListener metricsListener() - { - // Internally we can still register callbacks to this empty metrics listener. - return DEV_NULL_METRICS; - } - }; - - Metrics metrics(); - - MetricsListener metricsListener(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/SnapshotConnectionPoolMetrics.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/SnapshotConnectionPoolMetrics.java deleted file mode 100644 index f7ceaff8..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/SnapshotConnectionPoolMetrics.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import org.neo4j.driver.ConnectionPoolMetrics; - -import static java.lang.String.format; - -public class SnapshotConnectionPoolMetrics implements ConnectionPoolMetrics -{ - private final long acquired; - private final String id; - private final int acquiring; - private final PoolStatus poolStatus; - private final int idle; - private final int inUse; - private final int creating; - private final long created; - private final long failedToCreate; - private final long closed; - private final long timedOutToAcquire; - private final long totalAcquisitionTime; - private final long totalConnectionTime; - private final long totalInUseTime; - private final long totalInUseCount; - - public SnapshotConnectionPoolMetrics( ConnectionPoolMetrics other ) - { - id = other.id(); - acquired = other.acquired(); - acquiring = other.acquiring(); - poolStatus = other.poolStatus(); - idle = other.idle(); - inUse = other.inUse(); - creating = other.creating(); - created = other.created(); - failedToCreate = other.failedToCreate(); - closed = other.closed(); - timedOutToAcquire = other.timedOutToAcquire(); - totalAcquisitionTime = other.totalAcquisitionTime(); - totalConnectionTime = other.totalConnectionTime(); - totalInUseTime = other.totalInUseTime(); - totalInUseCount = other.totalInUseCount(); - } - - @Override - public String id() - { - return id; - } - - @Override - public PoolStatus poolStatus() - { - return poolStatus; - } - - @Override - public int inUse() - { - return inUse; - } - - @Override - public int idle() - { - return idle; - } - - @Override - public int creating() - { - return creating; - } - - @Override - public long created() - { - return created; - } - - @Override - public long failedToCreate() - { - return failedToCreate; - } - - @Override - public long closed() - { - return closed; - } - - @Override - public int acquiring() - { - return acquiring; - } - - @Override - public long acquired() - { - return acquired; - } - - @Override - public long timedOutToAcquire() - { - return timedOutToAcquire; - } - - @Override - public long totalAcquisitionTime() - { - return totalAcquisitionTime; - } - - @Override - public long totalConnectionTime() - { - return totalConnectionTime; - } - - @Override - public long totalInUseTime() - { - return totalInUseTime; - } - - @Override - public long totalInUseCount() - { - return totalInUseCount; - } - - @Override - public ConnectionPoolMetrics snapshot() - { - return this; - } - - @Override - public String toString() - { - return format( "[created=%s, closed=%s, creating=%s, failedToCreate=%s, acquiring=%s, acquired=%s, " + - "timedOutToAcquire=%s, inUse=%s, idle=%s, poolStatus=%s, " + - "totalAcquisitionTime=%s, totalConnectionTime=%s, totalInUseTime=%s, totalInUseCount=%s]", - created(), closed(), creating(), failedToCreate(), acquiring(), acquired(), - timedOutToAcquire(), inUse(), idle(), poolStatus(), - totalAcquisitionTime(), totalConnectionTime(), totalInUseTime(), totalInUseCount() ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/SnapshotMetrics.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/SnapshotMetrics.java deleted file mode 100644 index 681c124c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/SnapshotMetrics.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import java.util.HashMap; -import java.util.Map; - -import org.neo4j.driver.ConnectionPoolMetrics; -import org.neo4j.driver.Metrics; - -public class SnapshotMetrics implements Metrics -{ - private final Map poolMetrics; - - public SnapshotMetrics( Metrics metrics ) - { - Map other = metrics.connectionPoolMetrics(); - poolMetrics = new HashMap<>( other.size() ); - - for ( String id : other.keySet() ) - { - poolMetrics.put( id, other.get( id ).snapshot() ); - } - } - - @Override - public Map connectionPoolMetrics() - { - return poolMetrics; - } - - @Override - public Metrics snapshot() - { - return this; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/TimeRecorderListenerEvent.java b/src/graiph-driver/java/org/neo4j/driver/internal/metrics/TimeRecorderListenerEvent.java deleted file mode 100644 index 0f923e5e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/metrics/TimeRecorderListenerEvent.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.metrics; - -import org.neo4j.driver.internal.util.Clock; - -public class TimeRecorderListenerEvent implements ListenerEvent -{ - private long startTime; - private final Clock clock; - - public TimeRecorderListenerEvent( Clock clock ) - { - this.clock = clock; - } - - @Override - public void start() - { - startTime = clock.millis(); - } - - @Override - public long elapsed() - { - return clock.millis() - startTime; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/ByteArrayIncompatiblePacker.java b/src/graiph-driver/java/org/neo4j/driver/internal/packstream/ByteArrayIncompatiblePacker.java deleted file mode 100644 index 9f14ced4..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/ByteArrayIncompatiblePacker.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.packstream; - -import java.io.IOException; - -public class ByteArrayIncompatiblePacker extends PackStream.Packer -{ - public ByteArrayIncompatiblePacker( PackOutput out ) - { - super( out ); - } - - @Override - public void packBytesHeader( int size ) throws IOException - { - throw new PackStream.UnPackable( "Packing bytes is not supported " + - "as the current server this driver connected to does not support unpack bytes." ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackInput.java b/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackInput.java deleted file mode 100644 index 05c1440c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackInput.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.packstream; - -import java.io.IOException; - -/** - * This is what {@link PackStream} uses to ingest data, implement this on top of any data source of your choice to - * deserialize the stream with {@link PackStream}. - */ -public interface PackInput -{ - /** Consume one byte */ - byte readByte() throws IOException; - - /** Consume a 2-byte signed integer */ - short readShort() throws IOException; - - /** Consume a 4-byte signed integer */ - int readInt() throws IOException; - - /** Consume an 8-byte signed integer */ - long readLong() throws IOException; - - /** Consume an 8-byte IEEE 754 "double format" floating-point number */ - double readDouble() throws IOException; - - /** Consume a specified number of bytes */ - void readBytes( byte[] into, int offset, int toRead ) throws IOException; - - /** Get the next byte without forwarding the internal pointer */ - byte peekByte() throws IOException; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackOutput.java b/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackOutput.java deleted file mode 100644 index 66f68c18..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackOutput.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.packstream; - -import java.io.IOException; - -/** - * This is where {@link PackStream} writes its output to. - */ -public interface PackOutput -{ - /** Produce a single byte */ - PackOutput writeByte( byte value ) throws IOException; - - /** Produce binary data */ - PackOutput writeBytes( byte[] data ) throws IOException; - - /** Produce a 4-byte signed integer */ - PackOutput writeShort( short value ) throws IOException; - - /** Produce a 4-byte signed integer */ - PackOutput writeInt( int value ) throws IOException; - - /** Produce an 8-byte signed integer */ - PackOutput writeLong( long value ) throws IOException; - - /** Produce an 8-byte IEEE 754 "double format" floating-point number */ - PackOutput writeDouble( double value ) throws IOException; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackStream.java b/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackStream.java deleted file mode 100644 index 337d40f1..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackStream.java +++ /dev/null @@ -1,699 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.packstream; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.List; -import java.util.Map; - -import static java.lang.Integer.toHexString; -import static java.lang.String.format; -import static java.util.Collections.singletonList; - -/** - * PackStream is a messaging serialisation format heavily inspired by MessagePack. - * The key differences are in the type system itself which (among other things) replaces extensions with structures. - * The Packer and Unpacker implementations are also faster than their MessagePack counterparts. - * - * Note that several marker byte values are RESERVED for future use. - * Extra markers should not be added casually and such additions must be follow a strict process involving both client and server software. - * - * The table below shows all allocated marker byte values. - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
MarkerBinaryTypeDescription
00..7F0xxxxxxx+TINY_INTInteger 0 to 127
80..8F1000xxxxTINY_STRING
90..9F1001xxxxTINY_LIST
A0..AF1010xxxxTINY_MAP
B0..BF1011xxxxTINY_STRUCT
C011000000NULL
C111000001FLOAT_6464-bit floating point number (double)
C211000010FALSEBoolean false
C311000011TRUEBoolean true
C4..C7110001xxRESERVED
C811001000INT_88-bit signed integer
C911001001INT_816-bit signed integer
CA11001010INT_832-bit signed integer
CB11001011INT_864-bit signed integer
CC11001100BYTES_8Byte string (fewer than 28 bytes)
CD11001101BYTES_16Byte string (fewer than 216 bytes)
CE11001110BYTES_32Byte string (fewer than 232 bytes)
CF11001111RESERVED
D011010000STRING_8UTF-8 encoded string (fewer than 28 bytes)
D111010001STRING_16UTF-8 encoded string (fewer than 216 bytes)
D211010010STRING_32UTF-8 encoded string (fewer than 232 bytes)
D311010011RESERVED
D411010100LIST_8List (fewer than 28 items)
D511010101LIST_16List (fewer than 216 items)
D611010110LIST_32List (fewer than 232 items)
D711010111RESERVED
D811011000MAP_8Map (fewer than 28 key:value pairs)
D911011001MAP_16Map (fewer than 216 key:value pairs)
DA11011010MAP_32Map (fewer than 232 key:value pairs)
DB11011011RESERVED
DC11011100STRUCT_8Structure (fewer than 28 fields)
DD11011101STRUCT_16Structure (fewer than 216 fields)
DE11011110RESERVED
DF11011111RESERVED
DF11011111RESERVED
E0..EF1110xxxxRESERVED
F0..FF1111xxxx-TINY_INTInteger -1 to -16
- * - */ -public class PackStream -{ - - public static final byte TINY_STRING = (byte) 0x80; - public static final byte TINY_LIST = (byte) 0x90; - public static final byte TINY_MAP = (byte) 0xA0; - public static final byte TINY_STRUCT = (byte) 0xB0; - public static final byte NULL = (byte) 0xC0; - public static final byte FLOAT_64 = (byte) 0xC1; - public static final byte FALSE = (byte) 0xC2; - public static final byte TRUE = (byte) 0xC3; - public static final byte RESERVED_C4 = (byte) 0xC4; - public static final byte RESERVED_C5 = (byte) 0xC5; - public static final byte RESERVED_C6 = (byte) 0xC6; - public static final byte RESERVED_C7 = (byte) 0xC7; - public static final byte INT_8 = (byte) 0xC8; - public static final byte INT_16 = (byte) 0xC9; - public static final byte INT_32 = (byte) 0xCA; - public static final byte INT_64 = (byte) 0xCB; - public static final byte BYTES_8 = (byte) 0xCC; - public static final byte BYTES_16 = (byte) 0xCD; - public static final byte BYTES_32 = (byte) 0xCE; - public static final byte RESERVED_CF = (byte) 0xCF; - public static final byte STRING_8 = (byte) 0xD0; - public static final byte STRING_16 = (byte) 0xD1; - public static final byte STRING_32 = (byte) 0xD2; - public static final byte RESERVED_D3 = (byte) 0xD3; - public static final byte LIST_8 = (byte) 0xD4; - public static final byte LIST_16 = (byte) 0xD5; - public static final byte LIST_32 = (byte) 0xD6; - public static final byte RESERVED_D7 = (byte) 0xD7; - public static final byte MAP_8 = (byte) 0xD8; - public static final byte MAP_16 = (byte) 0xD9; - public static final byte MAP_32 = (byte) 0xDA; - public static final byte RESERVED_DB = (byte) 0xDB; - public static final byte STRUCT_8 = (byte) 0xDC; - public static final byte STRUCT_16 = (byte) 0xDD; - public static final byte RESERVED_DE = (byte) 0xDE; - public static final byte RESERVED_DF = (byte) 0xDF; - public static final byte RESERVED_E0 = (byte) 0xE0; - public static final byte RESERVED_E1 = (byte) 0xE1; - public static final byte RESERVED_E2 = (byte) 0xE2; - public static final byte RESERVED_E3 = (byte) 0xE3; - public static final byte RESERVED_E4 = (byte) 0xE4; - public static final byte RESERVED_E5 = (byte) 0xE5; - public static final byte RESERVED_E6 = (byte) 0xE6; - public static final byte RESERVED_E7 = (byte) 0xE7; - public static final byte RESERVED_E8 = (byte) 0xE8; - public static final byte RESERVED_E9 = (byte) 0xE9; - public static final byte RESERVED_EA = (byte) 0xEA; - public static final byte RESERVED_EB = (byte) 0xEB; - public static final byte RESERVED_EC = (byte) 0xEC; - public static final byte RESERVED_ED = (byte) 0xED; - public static final byte RESERVED_EE = (byte) 0xEE; - public static final byte RESERVED_EF = (byte) 0xEF; - - private static final long PLUS_2_TO_THE_31 = 2147483648L; - private static final long PLUS_2_TO_THE_16 = 65536L; - private static final long PLUS_2_TO_THE_15 = 32768L; - private static final long PLUS_2_TO_THE_7 = 128L; - private static final long MINUS_2_TO_THE_4 = -16L; - private static final long MINUS_2_TO_THE_7 = -128L; - private static final long MINUS_2_TO_THE_15 = -32768L; - private static final long MINUS_2_TO_THE_31 = -2147483648L; - - private static final String EMPTY_STRING = ""; - private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; - private static final Charset UTF_8 = Charset.forName( "UTF-8" ); - - private PackStream() {} - - public static class Packer - { - private PackOutput out; - - public Packer( PackOutput out ) - { - this.out = out; - } - - private void packRaw( byte[] data ) throws IOException - { - out.writeBytes( data ); - } - - public void packNull() throws IOException - { - out.writeByte( NULL ); - } - - public void pack( boolean value ) throws IOException - { - out.writeByte( value ? TRUE : FALSE ); - } - - public void pack( long value ) throws IOException - { - if ( value >= MINUS_2_TO_THE_4 && value < PLUS_2_TO_THE_7) - { - out.writeByte( (byte) value ); - } - else if ( value >= MINUS_2_TO_THE_7 && value < MINUS_2_TO_THE_4 ) - { - out.writeByte( INT_8 ) - .writeByte( (byte) value ); - } - else if ( value >= MINUS_2_TO_THE_15 && value < PLUS_2_TO_THE_15 ) - { - out.writeByte( INT_16 ) - .writeShort( (short) value ); - } - else if ( value >= MINUS_2_TO_THE_31 && value < PLUS_2_TO_THE_31 ) - { - out.writeByte( INT_32 ) - .writeInt( (int) value ); - } - else - { - out.writeByte( INT_64 ) - .writeLong( value ); - } - } - - public void pack( double value ) throws IOException - { - out.writeByte( FLOAT_64 ) - .writeDouble( value ); - } - - public void pack( byte[] values ) throws IOException - { - if ( values == null ) { packNull(); } - else - { - packBytesHeader( values.length ); - packRaw( values ); - } - } - - public void pack( String value ) throws IOException - { - if ( value == null ) { packNull(); } - else - { - byte[] utf8 = value.getBytes( UTF_8 ); - packStringHeader( utf8.length ); - packRaw( utf8 ); - } - } - - private void pack( List values ) throws IOException - { - if ( values == null ) { packNull(); } - else - { - packListHeader( values.size() ); - for ( Object value : values ) - { - pack( value ); - } - } - } - - private void pack( Map values ) throws IOException - { - if ( values == null ) { packNull(); } - else - { - packMapHeader( values.size() ); - for ( Object key : values.keySet() ) - { - pack( key ); - pack( values.get( key ) ); - } - } - } - - public void pack( Object value ) throws IOException - { - if ( value == null ) { packNull(); } - else if ( value instanceof Boolean ) { pack( (boolean) value ); } - else if ( value instanceof boolean[] ) { pack( singletonList( value ) ); } - else if ( value instanceof Byte ) { pack( (byte) value ); } - else if ( value instanceof byte[] ) { pack( (byte[]) value ); } - else if ( value instanceof Short ) { pack( (short) value ); } - else if ( value instanceof short[] ) { pack( singletonList( value ) ); } - else if ( value instanceof Integer ) { pack( (int) value ); } - else if ( value instanceof int[] ) { pack( singletonList( value ) ); } - else if ( value instanceof Long ) { pack( (long) value ); } - else if ( value instanceof long[] ) { pack( singletonList( value ) ); } - else if ( value instanceof Float ) { pack( (float) value ); } - else if ( value instanceof float[] ) { pack( singletonList( value ) ); } - else if ( value instanceof Double ) { pack( (double) value ); } - else if ( value instanceof double[] ) { pack( singletonList( value ) ); } - else if ( value instanceof Character ) { pack( Character.toString( (char) value ) ); } - else if ( value instanceof char[] ) { pack( new String( (char[]) value ) ); } - else if ( value instanceof String ) { pack( (String) value ); } - else if ( value instanceof String[] ) { pack( singletonList( value ) ); } - else if ( value instanceof List ) { pack( (List) value ); } - else if ( value instanceof Map ) { pack( (Map) value ); } - else { throw new UnPackable( format( "Cannot pack object %s", value ) );} - } - - public void packBytesHeader( int size ) throws IOException - { - if ( size <= Byte.MAX_VALUE ) - { - out.writeByte( BYTES_8 ) - .writeByte( (byte) size ); - } - else if ( size < PLUS_2_TO_THE_16 ) - { - out.writeByte( BYTES_16 ) - .writeShort( (short) size ); - } - else - { - out.writeByte( BYTES_32 ) - .writeInt( size ); - } - } - - private void packStringHeader( int size ) throws IOException - { - if ( size < 0x10 ) - { - out.writeByte( (byte) (TINY_STRING | size) ); - } - else if ( size <= Byte.MAX_VALUE ) - { - out.writeByte( STRING_8 ) - .writeByte( (byte) size ); - } - else if ( size < PLUS_2_TO_THE_16 ) - { - out.writeByte( STRING_16 ) - .writeShort( (short) size ); - } - else - { - out.writeByte( STRING_32 ) - .writeInt( size ); - } - } - - public void packListHeader( int size ) throws IOException - { - if ( size < 0x10 ) - { - out.writeByte( (byte) (TINY_LIST | size) ); - } - else if ( size <= Byte.MAX_VALUE ) - { - out.writeByte( LIST_8 ) - .writeByte( (byte) size ); - } - else if ( size < PLUS_2_TO_THE_16 ) - { - out.writeByte( LIST_16 ) - .writeShort( (short) size ); - } - else - { - out.writeByte( LIST_32 ) - .writeInt( size ); - } - } - - public void packMapHeader( int size ) throws IOException - { - if ( size < 0x10 ) - { - out.writeByte( (byte) (TINY_MAP | size) ); - } - else if ( size <= Byte.MAX_VALUE ) - { - out.writeByte( MAP_8 ) - .writeByte( (byte) size ); - } - else if ( size < PLUS_2_TO_THE_16 ) - { - out.writeByte( MAP_16 ) - .writeShort( (short) size ); - } - else - { - out.writeByte( MAP_32 ) - .writeInt( size ); - } - } - - public void packStructHeader( int size, byte signature ) throws IOException - { - if ( size < 0x10 ) - { - out.writeByte( (byte) (TINY_STRUCT | size) ) - .writeByte( signature ); - } - else if ( size <= Byte.MAX_VALUE ) - { - out.writeByte( STRUCT_8 ) - .writeByte( (byte) size ) - .writeByte( signature ); - } - else if ( size < PLUS_2_TO_THE_16 ) - { - out.writeByte( STRUCT_16 ) - .writeShort( (short) size ) - .writeByte( signature ); - } - else - { - throw new Overflow( - "Structures cannot have more than " + (PLUS_2_TO_THE_16 - 1) + " fields" ); - } - } - - } - - public static class Unpacker - { - private PackInput in; - - public Unpacker( PackInput in ) - { - this.in = in; - } - - public long unpackStructHeader() throws IOException - { - final byte markerByte = in.readByte(); - final byte markerHighNibble = (byte) (markerByte & 0xF0); - final byte markerLowNibble = (byte) (markerByte & 0x0F); - - if ( markerHighNibble == TINY_STRUCT ) { return markerLowNibble; } - switch(markerByte) - { - case STRUCT_8: return unpackUINT8(); - case STRUCT_16: return unpackUINT16(); - default: throw new Unexpected( "Expected a struct, but got: " + toHexString( markerByte )); - } - } - - public byte unpackStructSignature() throws IOException - { - return in.readByte(); - } - - public long unpackListHeader() throws IOException - { - final byte markerByte = in.readByte(); - final byte markerHighNibble = (byte) (markerByte & 0xF0); - final byte markerLowNibble = (byte) (markerByte & 0x0F); - - if ( markerHighNibble == TINY_LIST ) { return markerLowNibble; } - switch(markerByte) - { - case LIST_8: return unpackUINT8(); - case LIST_16: return unpackUINT16(); - case LIST_32: return unpackUINT32(); - default: throw new Unexpected( "Expected a list, but got: " + toHexString( markerByte & 0xFF )); - } - } - - public long unpackMapHeader() throws IOException - { - final byte markerByte = in.readByte(); - final byte markerHighNibble = (byte) (markerByte & 0xF0); - final byte markerLowNibble = (byte) (markerByte & 0x0F); - - if ( markerHighNibble == TINY_MAP ) { return markerLowNibble; } - switch(markerByte) - { - case MAP_8: return unpackUINT8(); - case MAP_16: return unpackUINT16(); - case MAP_32: return unpackUINT32(); - default: throw new Unexpected( "Expected a map, but got: " + toHexString( markerByte )); - } - } - - public long unpackLong() throws IOException - { - final byte markerByte = in.readByte(); - if ( markerByte >= MINUS_2_TO_THE_4) { return markerByte; } - switch(markerByte) - { - case INT_8: return in.readByte(); - case INT_16: return in.readShort(); - case INT_32: return in.readInt(); - case INT_64: return in.readLong(); - default: throw new Unexpected( "Expected an integer, but got: " + toHexString( markerByte )); - } - } - - public double unpackDouble() throws IOException - { - final byte markerByte = in.readByte(); - if(markerByte == FLOAT_64) - { - return in.readDouble(); - } - throw new Unexpected( "Expected a double, but got: " + toHexString( markerByte )); - } - - public byte[] unpackBytes() throws IOException - { - final byte markerByte = in.readByte(); - switch(markerByte) - { - case BYTES_8: return unpackRawBytes( unpackUINT8() ); - case BYTES_16: return unpackRawBytes( unpackUINT16() ); - case BYTES_32: - { - long size = unpackUINT32(); - if ( size <= Integer.MAX_VALUE ) - { - return unpackRawBytes( (int) size ); - } - else - { - throw new Overflow( "BYTES_32 too long for Java" ); - } - } - default: throw new Unexpected( "Expected bytes, but got: 0x" + toHexString( markerByte & 0xFF )); - } - } - - public String unpackString() throws IOException - { - final byte markerByte = in.readByte(); - if( markerByte == TINY_STRING ) // Note no mask, so we compare to 0x80. - { - return EMPTY_STRING; - } - - return new String(unpackUtf8(markerByte), UTF_8); - } - - /** - * This may seem confusing. This method exists to move forward the internal pointer when encountering - * a null value. The idiomatic usage would be someone using {@link #peekNextType()} to detect a null type, - * and then this method to "skip past it". - * @return null - * @throws IOException if the unpacked value was not null - */ - public Object unpackNull() throws IOException - { - final byte markerByte = in.readByte(); - if ( markerByte != NULL ) - { - throw new Unexpected( "Expected a null, but got: 0x" + toHexString( markerByte & 0xFF ) ); - } - return null; - } - - private byte[] unpackUtf8(byte markerByte) throws IOException - { - final byte markerHighNibble = (byte) (markerByte & 0xF0); - final byte markerLowNibble = (byte) (markerByte & 0x0F); - - if ( markerHighNibble == TINY_STRING ) { return unpackRawBytes( markerLowNibble ); } - switch(markerByte) - { - case STRING_8: return unpackRawBytes( unpackUINT8() ); - case STRING_16: return unpackRawBytes( unpackUINT16() ); - case STRING_32: - { - long size = unpackUINT32(); - if ( size <= Integer.MAX_VALUE ) - { - return unpackRawBytes( (int) size ); - } - else - { - throw new Overflow( "STRING_32 too long for Java" ); - } - } - default: throw new Unexpected( "Expected a string, but got: 0x" + toHexString( markerByte & 0xFF )); - } - } - - public boolean unpackBoolean() throws IOException - { - final byte markerByte = in.readByte(); - switch ( markerByte ) - { - case TRUE: - return true; - case FALSE: - return false; - default: - throw new Unexpected( "Expected a boolean, but got: 0x" + toHexString( markerByte & 0xFF ) ); - } - } - - private int unpackUINT8() throws IOException - { - return in.readByte() & 0xFF; - } - - private int unpackUINT16() throws IOException - { - return in.readShort() & 0xFFFF; - } - - private long unpackUINT32() throws IOException - { - return in.readInt() & 0xFFFFFFFFL; - } - - private byte[] unpackRawBytes(int size ) throws IOException - { - if ( size == 0 ) - { - return EMPTY_BYTE_ARRAY; - } - byte[] heapBuffer = new byte[size]; - in.readBytes( heapBuffer, 0, heapBuffer.length ); - return heapBuffer; - } - - public PackType peekNextType() throws IOException - { - final byte markerByte = in.peekByte(); - final byte markerHighNibble = (byte) (markerByte & 0xF0); - - switch(markerHighNibble) - { - case TINY_STRING: return PackType.STRING; - case TINY_LIST: return PackType.LIST; - case TINY_MAP: return PackType.MAP; - case TINY_STRUCT: return PackType.STRUCT; - } - - switch(markerByte) - { - case NULL: - return PackType.NULL; - case TRUE: - case FALSE: - return PackType.BOOLEAN; - case FLOAT_64: - return PackType.FLOAT; - case BYTES_8: - case BYTES_16: - case BYTES_32: - return PackType.BYTES; - case STRING_8: - case STRING_16: - case STRING_32: - return PackType.STRING; - case LIST_8: - case LIST_16: - case LIST_32: - return PackType.LIST; - case MAP_8: - case MAP_16: - case MAP_32: - return PackType.MAP; - case STRUCT_8: - case STRUCT_16: - return PackType.STRUCT; - default: - return PackType.INTEGER; - } - } - } - - public static class PackStreamException extends IOException - { - private static final long serialVersionUID = -1491422133282345421L; - - protected PackStreamException( String message ) - { - super( message ); - } - } - - public static class EndOfStream extends PackStreamException - { - private static final long serialVersionUID = 5102836237108105603L; - - public EndOfStream( String message ) - { - super( message ); - } - } - - public static class Overflow extends PackStreamException - { - private static final long serialVersionUID = -923071934446993659L; - - public Overflow( String message ) - { - super( message ); - } - } - - public static class Unexpected extends PackStreamException - { - private static final long serialVersionUID = 5004685868740125469L; - - public Unexpected( String message ) - { - super( message ); - } - } - - public static class UnPackable extends PackStreamException - { - private static final long serialVersionUID = 2408740707769711365L; - - public UnPackable( String message ) - { - super( message ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackType.java b/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackType.java deleted file mode 100644 index 07cfef9e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/packstream/PackType.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.packstream; - -public enum PackType -{ - NULL, BOOLEAN, INTEGER, FLOAT, BYTES, - STRING, LIST, MAP, STRUCT -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/AbstractRxStatementRunner.java b/src/graiph-driver/java/org/neo4j/driver/internal/reactive/AbstractRxStatementRunner.java deleted file mode 100644 index 59f2f6bf..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/AbstractRxStatementRunner.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.reactive; - -import java.util.Map; - -import org.neo4j.driver.reactive.RxResult; -import org.neo4j.driver.reactive.RxStatementRunner; -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; - -import static org.neo4j.driver.internal.AbstractStatementRunner.parameters; - -public abstract class AbstractRxStatementRunner implements RxStatementRunner -{ - @Override - public final RxResult run( String statementTemplate, Value parameters ) - { - return run( new Statement( statementTemplate, parameters ) ); - } - - @Override - public final RxResult run( String statementTemplate, Map statementParameters ) - { - return run( statementTemplate, parameters( statementParameters ) ); - } - - @Override - public final RxResult run( String statementTemplate, Record statementParameters ) - { - return run( statementTemplate, parameters( statementParameters ) ); - } - - @Override - public final RxResult run( String statementTemplate ) - { - return run( new Statement( statementTemplate ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxResult.java b/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxResult.java deleted file mode 100644 index d24fa9b4..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxResult.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.reactive; - -import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -import java.util.concurrent.CompletionStage; -import java.util.function.Supplier; - -import org.neo4j.driver.Record; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.reactive.RxResult; -import org.neo4j.driver.internal.cursor.RxStatementResultCursor; -import org.neo4j.driver.summary.ResultSummary; - -public class InternalRxResult implements RxResult -{ - private Supplier> cursorFutureSupplier; - private volatile CompletionStage cursorFuture; - - public InternalRxResult( Supplier> cursorFuture ) - { - this.cursorFutureSupplier = cursorFuture; - } - - @Override - public Publisher keys() - { - return Flux.defer( () -> Mono.fromCompletionStage( getCursorFuture() ) - .flatMapIterable( RxStatementResultCursor::keys ).onErrorMap( Futures::completionExceptionCause ) ); - } - - @Override - public Publisher records() - { - return Flux.create( sink -> getCursorFuture().whenComplete( ( cursor, completionError ) -> { - if( cursor != null ) - { - if( cursor.isDone() ) - { - sink.complete(); - } - else - { - cursor.installRecordConsumer( ( r, e ) -> { - if ( r != null ) - { - sink.next( r ); - } - else if ( e != null ) - { - sink.error( e ); - } - else - { - sink.complete(); - } - } ); - sink.onCancel( cursor::cancel ); - sink.onRequest( cursor::request ); - } - } - else - { - Throwable error = Futures.completionExceptionCause( completionError ); - sink.error( error ); - } - } ) ); - } - - private CompletionStage getCursorFuture() - { - if ( cursorFuture != null ) - { - return cursorFuture; - } - return initCursorFuture(); - } - - synchronized CompletionStage initCursorFuture() - { - // A quick path to return - if ( cursorFuture != null ) - { - return cursorFuture; - } - - // now we obtained lock and we are going to be the one who assigns cursorFuture one and only once. - cursorFuture = cursorFutureSupplier.get(); - cursorFutureSupplier = null; // we no longer need the reference to this object - return this.cursorFuture; - } - - @Override - public Publisher summary() - { - return Mono.create( sink -> getCursorFuture().whenComplete( ( cursor, completionError ) -> { - if ( cursor != null ) - { - cursor.summaryAsync().whenComplete( ( summary, summaryCompletionError ) -> { - Throwable error = Futures.completionExceptionCause( summaryCompletionError ); - if ( summary != null ) - { - sink.success( summary ); - } - else - { - sink.error( error ); - } - } ); - } - else - { - Throwable error = Futures.completionExceptionCause( completionError ); - sink.error( error ); - } - } ) ); - } - - // For testing purpose - Supplier> cursorFutureSupplier() - { - return this.cursorFutureSupplier; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxSession.java b/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxSession.java deleted file mode 100644 index fb1fdb46..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxSession.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.reactive; - -import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Statement; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.internal.async.NetworkSession; -import org.neo4j.driver.internal.cursor.RxStatementResultCursor; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.reactive.RxResult; -import org.neo4j.driver.reactive.RxSession; -import org.neo4j.driver.reactive.RxTransaction; -import org.neo4j.driver.reactive.RxTransactionWork; - -import static org.neo4j.driver.internal.reactive.RxUtils.createEmptyPublisher; -import static org.neo4j.driver.internal.reactive.RxUtils.createMono; - -public class InternalRxSession extends AbstractRxStatementRunner implements RxSession -{ - private final NetworkSession session; - - public InternalRxSession( NetworkSession session ) - { - // RxSession accept a network session as input. - // The network session different from async session that it provides ways to both run for Rx and Async - // Note: Blocking result could just build on top of async result. However Rx result cannot just build on top of async result. - this.session = session; - } - - @Override - public Publisher beginTransaction() - { - return beginTransaction( TransactionConfig.empty() ); - } - - @Override - public Publisher beginTransaction( TransactionConfig config ) - { - return createMono( () -> - { - CompletableFuture txFuture = new CompletableFuture<>(); - session.beginTransactionAsync( config ).whenComplete( ( tx, completionError ) -> { - if ( tx != null ) - { - txFuture.complete( new InternalRxTransaction( tx ) ); - } - else - { - releaseConnectionBeforeReturning( txFuture, completionError ); - } - } ); - return txFuture; - } ); - } - - private Publisher beginTransaction( AccessMode mode, TransactionConfig config ) - { - return createMono( () -> - { - CompletableFuture txFuture = new CompletableFuture<>(); - session.beginTransactionAsync( mode, config ).whenComplete( ( tx, completionError ) -> { - if ( tx != null ) - { - txFuture.complete( new InternalRxTransaction( tx ) ); - } - else - { - releaseConnectionBeforeReturning( txFuture, completionError ); - } - } ); - return txFuture; - } ); - } - - @Override - public Publisher readTransaction( RxTransactionWork> work ) - { - return readTransaction( work, TransactionConfig.empty() ); - } - - @Override - public Publisher readTransaction( RxTransactionWork> work, TransactionConfig config ) - { - return runTransaction( AccessMode.READ, work, config ); - } - - @Override - public Publisher writeTransaction( RxTransactionWork> work ) - { - return writeTransaction( work, TransactionConfig.empty() ); - } - - @Override - public Publisher writeTransaction( RxTransactionWork> work, TransactionConfig config ) - { - return runTransaction( AccessMode.WRITE, work, config ); - } - - private Publisher runTransaction( AccessMode mode, RxTransactionWork> work, TransactionConfig config ) - { - Flux repeatableWork = Flux.usingWhen( beginTransaction( mode, config ), work::execute, RxTransaction::commit, RxTransaction::rollback ); - return session.retryLogic().retryRx( repeatableWork ); - } - - @Override - public RxResult run( String statement, TransactionConfig config ) - { - return run( new Statement( statement ), config ); - } - - @Override - public RxResult run( String statement, Map parameters, TransactionConfig config ) - { - return run( new Statement( statement, parameters ), config ); - } - - @Override - public RxResult run( Statement statement ) - { - return run( statement, TransactionConfig.empty() ); - } - - @Override - public RxResult run( Statement statement, TransactionConfig config ) - { - return new InternalRxResult( () -> { - CompletableFuture resultCursorFuture = new CompletableFuture<>(); - session.runRx( statement, config ).whenComplete( ( cursor, completionError ) -> { - if ( cursor != null ) - { - resultCursorFuture.complete( cursor ); - } - else - { - releaseConnectionBeforeReturning( resultCursorFuture, completionError ); - } - } ); - return resultCursorFuture; - } ); - } - - private void releaseConnectionBeforeReturning( CompletableFuture returnFuture, Throwable completionError ) - { - // We failed to create a result cursor so we cannot rely on result cursor to cleanup resources. - // Therefore we will first release the connection that might have been created in the session and then notify the error. - // The logic here shall be the same as `SessionPullResponseHandler#afterFailure`. - // The reason we need to release connection in session is that we do not have a `rxSession.close()`; - // Otherwise, session.close shall handle everything for us. - Throwable error = Futures.completionExceptionCause( completionError ); - session.releaseConnectionAsync().whenComplete( ( ignored, closeError ) -> - returnFuture.completeExceptionally( Futures.combineErrors( error, closeError ) ) ); - } - - @Override - public String lastBookmark() - { - return session.lastBookmark(); - } - - public Publisher reset() - { - return createEmptyPublisher( session::resetAsync ); - } - - @Override - public Publisher close() - { - return createEmptyPublisher( session::closeAsync ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxTransaction.java b/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxTransaction.java deleted file mode 100644 index 7a5322a4..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/InternalRxTransaction.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.reactive; - -import org.reactivestreams.Publisher; - -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.internal.async.ExplicitTransaction; -import org.neo4j.driver.internal.cursor.RxStatementResultCursor; -import org.neo4j.driver.internal.util.Futures; -import org.neo4j.driver.reactive.RxResult; -import org.neo4j.driver.reactive.RxTransaction; - -import static org.neo4j.driver.internal.reactive.RxUtils.createEmptyPublisher; - -public class InternalRxTransaction extends AbstractRxStatementRunner implements RxTransaction -{ - private final ExplicitTransaction tx; - - public InternalRxTransaction( ExplicitTransaction tx ) - { - this.tx = tx; - } - - @Override - public RxResult run( Statement statement ) - { - return new InternalRxResult( () -> { - CompletableFuture cursorFuture = new CompletableFuture<>(); - tx.runRx( statement ).whenComplete( ( cursor, completionError ) -> { - if ( cursor != null ) - { - cursorFuture.complete( cursor ); - } - else - { - // We failed to create a result cursor so we cannot rely on result cursor to handle failure. - // The logic here shall be the same as `TransactionPullResponseHandler#afterFailure` as that is where cursor handling failure - // This is optional as tx still holds a reference to all cursor futures and they will be clean up properly in commit - Throwable error = Futures.completionExceptionCause( completionError ); - tx.markTerminated(); - cursorFuture.completeExceptionally( error ); - } - } ); - return cursorFuture; - } ); - } - - @Override - public Publisher commit() - { - return close( true ); - } - - @Override - public Publisher rollback() - { - return close( false ); - } - - private Publisher close( boolean commit ) - { - return createEmptyPublisher( () -> { - if ( commit ) - { - return tx.commitAsync(); - } - else - { - return tx.rollbackAsync(); - } - } ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/RxUtils.java b/src/graiph-driver/java/org/neo4j/driver/internal/reactive/RxUtils.java deleted file mode 100644 index 9af9d1a2..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/reactive/RxUtils.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.reactive; - -import org.reactivestreams.Publisher; -import reactor.core.publisher.Mono; - -import java.util.concurrent.CompletionStage; -import java.util.function.Supplier; - -import org.neo4j.driver.internal.util.Futures; - -public class RxUtils -{ - /** - * The publisher created by this method will either succeed without publishing anything or fail with an error. - * @param supplier supplies a {@link CompletionStage}. - * @return A publisher that publishes nothing on completion or fails with an error. - */ - public static Publisher createEmptyPublisher( Supplier> supplier ) - { - return Mono.create( sink -> supplier.get().whenComplete( ( ignore, completionError ) -> { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( error != null ) - { - sink.error( error ); - } - else - { - sink.success(); - } - } ) ); - } - - /** - * Create a {@link Mono} publisher from the given {@link CompletionStage} supplier. - * @param supplier supplies a {@link CompletionStage}. - * @param the type of the item to publish. - * @return A {@link Mono} publisher. - */ - public static Publisher createMono( Supplier> supplier ) - { - return Mono.create( sink -> supplier.get().whenComplete( ( item, completionError ) -> { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( item != null ) - { - sink.success( item ); - } - if ( error != null ) - { - sink.error( error ); - } - else - { - sink.success(); - } - } ) ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/retry/ExponentialBackoffRetryLogic.java b/src/graiph-driver/java/org/neo4j/driver/internal/retry/ExponentialBackoffRetryLogic.java deleted file mode 100644 index 6791d662..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/retry/ExponentialBackoffRetryLogic.java +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.retry; - -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.EventExecutorGroup; -import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; -import reactor.util.context.Context; -import reactor.util.function.Tuples; - -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.function.Supplier; - -import org.neo4j.driver.Logger; -import org.neo4j.driver.Logging; -import org.neo4j.driver.exceptions.ServiceUnavailableException; -import org.neo4j.driver.exceptions.SessionExpiredException; -import org.neo4j.driver.exceptions.TransientException; -import org.neo4j.driver.internal.util.Clock; -import org.neo4j.driver.internal.util.Futures; - -import static java.util.concurrent.TimeUnit.SECONDS; - -public class ExponentialBackoffRetryLogic implements RetryLogic -{ - private final static String RETRY_LOGIC_LOG_NAME = "RetryLogic"; - - static final long DEFAULT_MAX_RETRY_TIME_MS = SECONDS.toMillis( 30 ); - - private static final long INITIAL_RETRY_DELAY_MS = SECONDS.toMillis( 1 ); - private static final double RETRY_DELAY_MULTIPLIER = 2.0; - private static final double RETRY_DELAY_JITTER_FACTOR = 0.2; - private static final long MAX_RETRY_DELAY = Long.MAX_VALUE / 2; - - private final long maxRetryTimeMs; - private final long initialRetryDelayMs; - private final double multiplier; - private final double jitterFactor; - private final EventExecutorGroup eventExecutorGroup; - private final Clock clock; - private final Logger log; - - public ExponentialBackoffRetryLogic( RetrySettings settings, EventExecutorGroup eventExecutorGroup, Clock clock, - Logging logging ) - { - this( settings.maxRetryTimeMs(), INITIAL_RETRY_DELAY_MS, RETRY_DELAY_MULTIPLIER, RETRY_DELAY_JITTER_FACTOR, - eventExecutorGroup, clock, logging ); - } - - ExponentialBackoffRetryLogic( long maxRetryTimeMs, long initialRetryDelayMs, double multiplier, - double jitterFactor, EventExecutorGroup eventExecutorGroup, Clock clock, Logging logging ) - { - this.maxRetryTimeMs = maxRetryTimeMs; - this.initialRetryDelayMs = initialRetryDelayMs; - this.multiplier = multiplier; - this.jitterFactor = jitterFactor; - this.eventExecutorGroup = eventExecutorGroup; - this.clock = clock; - this.log = logging.getLog( RETRY_LOGIC_LOG_NAME ); - - verifyAfterConstruction(); - } - - @Override - public T retry( Supplier work ) - { - List errors = null; - long startTime = -1; - long nextDelayMs = initialRetryDelayMs; - - while ( true ) - { - try - { - return work.get(); - } - catch ( Throwable error ) - { - if ( canRetryOn( error ) ) - { - long currentTime = clock.millis(); - if ( startTime == -1 ) - { - startTime = currentTime; - } - - long elapsedTime = currentTime - startTime; - if ( elapsedTime < maxRetryTimeMs ) - { - long delayWithJitterMs = computeDelayWithJitter( nextDelayMs ); - log.warn( "Transaction failed and will be retried in " + delayWithJitterMs + "ms", error ); - - sleep( delayWithJitterMs ); - nextDelayMs = (long) (nextDelayMs * multiplier); - errors = recordError( error, errors ); - continue; - } - } - addSuppressed( error, errors ); - throw error; - } - } - } - - @Override - public CompletionStage retryAsync( Supplier> work ) - { - CompletableFuture resultFuture = new CompletableFuture<>(); - executeWorkInEventLoop( resultFuture, work ); - return resultFuture; - } - - @Override - public Publisher retryRx( Publisher work ) - { - return Flux.from( work ).retryWhen( retryRxCondition() ); - } - - protected boolean canRetryOn( Throwable error ) - { - return error instanceof SessionExpiredException || - error instanceof ServiceUnavailableException || - isTransientError( error ); - } - - private Function,Publisher> retryRxCondition() - { - return errorCurrentAttempt -> errorCurrentAttempt.flatMap( e -> Mono.subscriberContext().map( ctx -> Tuples.of( e, ctx ) ) ).flatMap( t2 -> { - Throwable lastError = t2.getT1(); - Context ctx = t2.getT2(); - - List errors = ctx.getOrDefault( "errors", null ); - - long startTime = ctx.getOrDefault( "startTime", -1L ); - long nextDelayMs = ctx.getOrDefault( "nextDelayMs", initialRetryDelayMs ); - - if( !canRetryOn( lastError ) ) - { - addSuppressed( lastError, errors ); - return Mono.error( lastError ); - } - - long currentTime = clock.millis(); - if ( startTime == -1 ) - { - startTime = currentTime; - } - - long elapsedTime = currentTime - startTime; - if ( elapsedTime < maxRetryTimeMs ) - { - long delayWithJitterMs = computeDelayWithJitter( nextDelayMs ); - log.warn( "Reactive transaction failed and is scheduled to retry in " + delayWithJitterMs + "ms", lastError ); - - nextDelayMs = (long) (nextDelayMs * multiplier); - errors = recordError( lastError, errors ); - - // retry on netty event loop thread - EventExecutor eventExecutor = eventExecutorGroup.next(); - return Mono.just( ctx.put( "errors", errors ).put( "startTime", startTime ).put( "nextDelayMs", nextDelayMs ) ) - .delayElement( Duration.ofMillis( delayWithJitterMs ), Schedulers.fromExecutorService( eventExecutor ) ); - } - else - { - addSuppressed( lastError, errors ); - - return Mono.error( lastError ); - } - } ); - } - - private void executeWorkInEventLoop( CompletableFuture resultFuture, Supplier> work ) - { - // this is the very first time we execute given work - EventExecutor eventExecutor = eventExecutorGroup.next(); - - eventExecutor.execute( () -> executeWork( resultFuture, work, -1, initialRetryDelayMs, null ) ); - } - - private void retryWorkInEventLoop( CompletableFuture resultFuture, Supplier> work, - Throwable error, long startTime, long delayMs, List errors ) - { - // work has failed before, we need to schedule retry with the given delay - EventExecutor eventExecutor = eventExecutorGroup.next(); - - long delayWithJitterMs = computeDelayWithJitter( delayMs ); - log.warn( "Async transaction failed and is scheduled to retry in " + delayWithJitterMs + "ms", error ); - - eventExecutor.schedule( () -> - { - long newRetryDelayMs = (long) (delayMs * multiplier); - executeWork( resultFuture, work, startTime, newRetryDelayMs, errors ); - }, delayWithJitterMs, TimeUnit.MILLISECONDS ); - } - - private void executeWork( CompletableFuture resultFuture, Supplier> work, - long startTime, long retryDelayMs, List errors ) - { - CompletionStage workStage; - try - { - workStage = work.get(); - } - catch ( Throwable error ) - { - // work failed in a sync way, attempt to schedule a retry - retryOnError( resultFuture, work, startTime, retryDelayMs, error, errors ); - return; - } - - workStage.whenComplete( ( result, completionError ) -> - { - Throwable error = Futures.completionExceptionCause( completionError ); - if ( error != null ) - { - // work failed in async way, attempt to schedule a retry - retryOnError( resultFuture, work, startTime, retryDelayMs, error, errors ); - } - else - { - resultFuture.complete( result ); - } - } ); - } - - private void retryOnError( CompletableFuture resultFuture, Supplier> work, - long startTime, long retryDelayMs, Throwable error, List errors ) - { - if ( canRetryOn( error ) ) - { - long currentTime = clock.millis(); - if ( startTime == -1 ) - { - startTime = currentTime; - } - - long elapsedTime = currentTime - startTime; - if ( elapsedTime < maxRetryTimeMs ) - { - errors = recordError( error, errors ); - retryWorkInEventLoop( resultFuture, work, error, startTime, retryDelayMs, errors ); - return; - } - } - - addSuppressed( error, errors ); - resultFuture.completeExceptionally( error ); - } - - private long computeDelayWithJitter( long delayMs ) - { - if ( delayMs > MAX_RETRY_DELAY ) - { - delayMs = MAX_RETRY_DELAY; - } - - long jitter = (long) (delayMs * jitterFactor); - long min = delayMs - jitter; - long max = delayMs + jitter; - return ThreadLocalRandom.current().nextLong( min, max + 1 ); - } - - private void sleep( long delayMs ) - { - try - { - clock.sleep( delayMs ); - } - catch ( InterruptedException e ) - { - Thread.currentThread().interrupt(); - throw new IllegalStateException( "Retries interrupted", e ); - } - } - - private void verifyAfterConstruction() - { - if ( maxRetryTimeMs < 0 ) - { - throw new IllegalArgumentException( "Max retry time should be >= 0: " + maxRetryTimeMs ); - } - if ( initialRetryDelayMs < 0 ) - { - throw new IllegalArgumentException( "Initial retry delay should >= 0: " + initialRetryDelayMs ); - } - if ( multiplier < 1.0 ) - { - throw new IllegalArgumentException( "Multiplier should be >= 1.0: " + multiplier ); - } - if ( jitterFactor < 0 || jitterFactor > 1 ) - { - throw new IllegalArgumentException( "Jitter factor should be in [0.0, 1.0]: " + jitterFactor ); - } - if ( clock == null ) - { - throw new IllegalArgumentException( "Clock should not be null" ); - } - } - - private static boolean isTransientError( Throwable error ) - { - if ( error instanceof TransientException ) - { - String code = ((TransientException) error).code(); - // Retries should not happen when transaction was explicitly terminated by the user. - // Termination of transaction might result in two different error codes depending on where it was - // terminated. These are really client errors but classification on the server is not entirely correct and - // they are classified as transient. - if ( "Neo.TransientError.Transaction.Terminated".equals( code ) || - "Neo.TransientError.Transaction.LockClientStopped".equals( code ) ) - { - return false; - } - return true; - } - return false; - } - - private static List recordError( Throwable error, List errors ) - { - if ( errors == null ) - { - errors = new ArrayList<>(); - } - errors.add( error ); - return errors; - } - - private static void addSuppressed( Throwable error, List suppressedErrors ) - { - if ( suppressedErrors != null ) - { - for ( Throwable suppressedError : suppressedErrors ) - { - if ( error != suppressedError ) - { - error.addSuppressed( suppressedError ); - } - } - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/retry/RetryLogic.java b/src/graiph-driver/java/org/neo4j/driver/internal/retry/RetryLogic.java deleted file mode 100644 index 332626d1..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/retry/RetryLogic.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.retry; - -import org.reactivestreams.Publisher; - -import java.util.concurrent.CompletionStage; -import java.util.function.Supplier; - -public interface RetryLogic -{ - T retry( Supplier work ); - - CompletionStage retryAsync( Supplier> work ); - - Publisher retryRx( Publisher work ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/retry/RetrySettings.java b/src/graiph-driver/java/org/neo4j/driver/internal/retry/RetrySettings.java deleted file mode 100644 index 741b85e1..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/retry/RetrySettings.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.retry; - -public final class RetrySettings -{ - public static final RetrySettings DEFAULT = - new RetrySettings( ExponentialBackoffRetryLogic.DEFAULT_MAX_RETRY_TIME_MS ); - - private final long maxRetryTimeMs; - - public RetrySettings( long maxRetryTimeMs ) - { - this.maxRetryTimeMs = maxRetryTimeMs; - } - - public long maxRetryTimeMs() - { - return maxRetryTimeMs; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/security/InternalAuthToken.java b/src/graiph-driver/java/org/neo4j/driver/internal/security/InternalAuthToken.java deleted file mode 100644 index 7d9c9f7a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/security/InternalAuthToken.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.security; - -import java.util.Map; - -import org.neo4j.driver.AuthToken; -import org.neo4j.driver.Value; - -/** - * A simple common token for authentication schemes that easily convert to - * an auth token map - */ -public class InternalAuthToken implements AuthToken -{ - public static final String SCHEME_KEY = "scheme"; - public static final String PRINCIPAL_KEY = "principal"; - public static final String CREDENTIALS_KEY = "credentials"; - public static final String REALM_KEY = "realm"; - public static final String PARAMETERS_KEY = "parameters"; - - private final Map content; - - public InternalAuthToken( Map content ) - { - this.content = content; - } - - public Map toMap() - { - return content; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { return true; } - if ( o == null || getClass() != o.getClass() ) - { return false; } - - InternalAuthToken that = (InternalAuthToken) o; - - return content != null ? content.equals( that.content ) : that.content == null; - - } - - @Override - public int hashCode() - { - return content != null ? content.hashCode() : 0; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/security/SecurityPlan.java b/src/graiph-driver/java/org/neo4j/driver/internal/security/SecurityPlan.java deleted file mode 100644 index 322dafbb..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/security/SecurityPlan.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.security; - -import java.io.File; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.KeyManagementException; -import java.security.KeyStore; -import java.security.NoSuchAlgorithmException; -import javax.net.ssl.KeyManager; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.Logger; - -import static org.neo4j.driver.internal.util.CertificateTool.loadX509Cert; - -/** - * A SecurityPlan consists of encryption and trust details. - */ -public class SecurityPlan -{ - public static SecurityPlan forAllCertificates( boolean requiresHostnameVerification ) throws GeneralSecurityException - { - SSLContext sslContext = SSLContext.getInstance( "TLS" ); - sslContext.init( new KeyManager[0], new TrustManager[]{new TrustAllTrustManager()}, null ); - - return new SecurityPlan( true, sslContext, true, requiresHostnameVerification ); - } - - public static SecurityPlan forCustomCASignedCertificates( File certFile, boolean requiresHostnameVerification ) - throws GeneralSecurityException, IOException - { - // A certificate file is specified so we will load the certificates in the file - // Init a in memory TrustedKeyStore - KeyStore trustedKeyStore = KeyStore.getInstance( "JKS" ); - trustedKeyStore.load( null, null ); - - // Load the certs from the file - loadX509Cert( certFile, trustedKeyStore ); - - // Create TrustManager from TrustedKeyStore - TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance( "SunX509" ); - trustManagerFactory.init( trustedKeyStore ); - - SSLContext sslContext = SSLContext.getInstance( "TLS" ); - sslContext.init( new KeyManager[0], trustManagerFactory.getTrustManagers(), null ); - - return new SecurityPlan( true, sslContext, true, requiresHostnameVerification ); - } - - public static SecurityPlan forSystemCASignedCertificates( boolean requiresHostnameVerification ) throws NoSuchAlgorithmException - { - return new SecurityPlan( true, SSLContext.getDefault(), true, requiresHostnameVerification ); - } - - @Deprecated - public static SecurityPlan forTrustOnFirstUse( File knownHosts, boolean requiresHostnameVerification, BoltServerAddress address, Logger logger ) - throws IOException, KeyManagementException, NoSuchAlgorithmException - { - SSLContext sslContext = SSLContext.getInstance( "TLS" ); - sslContext.init( new KeyManager[0], new TrustManager[]{new TrustOnFirstUseTrustManager( address, knownHosts, logger )}, null ); - - return new SecurityPlan( true, sslContext, false, requiresHostnameVerification ); - } - - public static SecurityPlan insecure() - { - return new SecurityPlan( false, null, true, false ); - } - - private final boolean requiresEncryption; - private final SSLContext sslContext; - private final boolean routingCompatible; - private final boolean requiresHostnameVerification; - - private SecurityPlan( boolean requiresEncryption, SSLContext sslContext, boolean routingCompatible, boolean requiresHostnameVerification ) - { - this.requiresEncryption = requiresEncryption; - this.sslContext = sslContext; - this.routingCompatible = routingCompatible; - this.requiresHostnameVerification = requiresHostnameVerification; - } - - public boolean requiresEncryption() - { - return requiresEncryption; - } - - public boolean isRoutingCompatible() - { - return routingCompatible; - } - - public SSLContext sslContext() - { - return sslContext; - } - - public boolean requiresHostnameVerification() - { - return requiresHostnameVerification; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/security/TrustAllTrustManager.java b/src/graiph-driver/java/org/neo4j/driver/internal/security/TrustAllTrustManager.java deleted file mode 100644 index fec650c4..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/security/TrustAllTrustManager.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.security; - -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import javax.net.ssl.X509TrustManager; - -public class TrustAllTrustManager implements X509TrustManager -{ - public void checkClientTrusted( X509Certificate[] chain, String authType ) throws CertificateException - { - throw new CertificateException( "All client connections to this client are forbidden." ); - } - - public void checkServerTrusted( X509Certificate[] chain, String authType ) throws CertificateException - { - // all fine, pass through - } - - public X509Certificate[] getAcceptedIssuers() - { - return new X509Certificate[0]; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/security/TrustOnFirstUseTrustManager.java b/src/graiph-driver/java/org/neo4j/driver/internal/security/TrustOnFirstUseTrustManager.java deleted file mode 100644 index ec415a00..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/security/TrustOnFirstUseTrustManager.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.security; - -import io.netty.buffer.ByteBufUtil; - -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import javax.net.ssl.X509TrustManager; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.Logger; - -import static java.lang.String.format; -import static org.neo4j.driver.internal.util.CertificateTool.X509CertToString; - -/** - * References: - * http://stackoverflow.com/questions/6802421/how-to-compare-distinct-implementations-of-java-security-cert-x509certificate?answertab=votes#tab-top - */ -public class TrustOnFirstUseTrustManager implements X509TrustManager -{ - /** - * A list of pairs (known_server certificate) are stored in this file. - * When establishing a SSL connection to a new server, we will save the server's host:port and its certificate in this - * file. - * Then when we try to connect to a known server again, we will authenticate the server by checking if it provides - * the same certificate as the one saved in this file. - */ - private final File knownHosts; - - /** The server ip:port (in digits) of the server that we are currently connected to */ - private final String serverId; - private final Logger logger; - - /** The known certificate we've registered for this server */ - private String fingerprint; - - TrustOnFirstUseTrustManager( BoltServerAddress address, File knownHosts, Logger logger ) throws IOException - { - this.logger = logger; - this.serverId = address.toString(); - this.knownHosts = knownHosts; - load(); - } - - /** - * Try to load the certificate form the file if the server we've connected is a known server. - * - * @throws IOException - */ - private void load() throws IOException - { - if ( !knownHosts.exists() ) - { - return; - } - - assertKnownHostFileReadable(); - - try ( BufferedReader reader = new BufferedReader( new FileReader( knownHosts ) ) ) - { - String line; - while ( (line = reader.readLine()) != null ) - { - if ( (!line.trim().startsWith( "#" )) ) - { - String[] strings = line.split( " " ); - if ( strings[0].trim().equals( serverId ) ) - { - // load the certificate - fingerprint = strings[1].trim(); - return; - } - } - } - } - } - - /** - * Save a new (server_ip, cert) pair into knownHosts file - * - * @param fingerprint the SHA-512 fingerprint of the host certificate - */ - private void saveTrustedHost( String fingerprint ) throws IOException - { - this.fingerprint = fingerprint; - - logger.info( "Adding %s as known and trusted certificate for %s.", fingerprint, serverId ); - createKnownCertFileIfNotExists(); - - assertKnownHostFileWritable(); - try ( BufferedWriter writer = new BufferedWriter( new FileWriter( knownHosts, true ) ) ) - { - writer.write( serverId + " " + this.fingerprint ); - writer.newLine(); - } - } - - - private void assertKnownHostFileReadable() throws IOException - { - if( !knownHosts.canRead() ) - { - throw new IOException( format( - "Failed to load certificates from file %s as you have no read permissions to it.\n" + - "Try configuring the Neo4j driver to use a file system location you do have read permissions to.", - knownHosts.getAbsolutePath() - ) ); - } - } - - private void assertKnownHostFileWritable() throws IOException - { - if( !knownHosts.canWrite() ) - { - throw new IOException( format( - "Failed to write certificates to file %s as you have no write permissions to it.\n" + - "Try configuring the Neo4j driver to use a file system location you do have write permissions to.", - knownHosts.getAbsolutePath() - ) ); - } - } - - /* - * Disallow all client connection to this client - */ - public void checkClientTrusted( X509Certificate[] chain, String authType ) - throws CertificateException - { - throw new CertificateException( "All client connections to this client are forbidden." ); - } - - /* - * Trust the cert if it is seen first time for this server or it is the same with the one registered. - */ - public void checkServerTrusted( X509Certificate[] chain, String authType ) - throws CertificateException - { - X509Certificate certificate = chain[0]; - - String cert = fingerprint( certificate ); - - if ( this.fingerprint == null ) - { - try - { - saveTrustedHost( cert ); - } - catch ( IOException e ) - { - throw new CertificateException( format( - "Failed to save the server ID and the certificate received from the server to file %s.\n" + - "Server ID: %s\nReceived cert:\n%s", - knownHosts.getAbsolutePath(), serverId, X509CertToString( cert ) ), e ); - } - } - else - { - if ( !this.fingerprint.equals( cert ) ) - { - throw new CertificateException( format( - "Unable to connect to neo4j at `%s`, because the certificate the server uses has changed. " + - "This is a security feature to protect against man-in-the-middle attacks.\n" + - "If you trust the certificate the server uses now, simply remove the line that starts with " + - "`%s` " + - "in the file `%s`.\n" + - "The old certificate saved in file is:\n%sThe New certificate received is:\n%s", - serverId, serverId, knownHosts.getAbsolutePath(), - X509CertToString( this.fingerprint ), X509CertToString( cert ) ) ); - } - } - } - - /** - * Calculate the certificate fingerprint - simply the SHA-512 hash of the DER-encoded certificate. - */ - public static String fingerprint( X509Certificate cert ) throws CertificateException - { - try - { - MessageDigest md = MessageDigest.getInstance( "SHA-512" ); - md.update( cert.getEncoded() ); - return ByteBufUtil.hexDump( md.digest() ); - } - catch( NoSuchAlgorithmException e ) - { - // SHA-1 not available - throw new CertificateException( "Cannot use TLS on this platform, because SHA-512 message digest algorithm is not available: " + e.getMessage(), e ); - } - } - - private File createKnownCertFileIfNotExists() throws IOException - { - if ( !knownHosts.exists() ) - { - File parentDir = knownHosts.getParentFile(); - try - { - if ( parentDir != null && !parentDir.exists() ) - { - if ( !parentDir.mkdirs() ) - { - throw new IOException( "Failed to create directories for the known hosts file in " + knownHosts.getAbsolutePath() + - ". This is usually because you do not have write permissions to the directory. " + - "Try configuring the Neo4j driver to use a file system location you do have write permissions to." ); - } - } - if ( !knownHosts.createNewFile() ) - { - throw new IOException( "Failed to create a known hosts file at " + knownHosts.getAbsolutePath() + - ". This is usually because you do not have write permissions to the directory. " + - "Try configuring the Neo4j driver to use a file system location you do have write permissions to." ); - } - } - catch( SecurityException e ) - { - throw new IOException( "Failed to create known host file and/or parent directories at " + knownHosts.getAbsolutePath() + - ". This is usually because you do not have write permission to the directory. " + - "Try configuring the Neo4j driver to use a file location you have write permissions to." ); - } - BufferedWriter writer = new BufferedWriter( new FileWriter( knownHosts ) ); - writer.write( "# This file contains trusted certificates for Neo4j servers, it's created by Neo4j drivers." ); - writer.newLine(); - writer.write( "# You can configure the location of this file in `org.neo4j.driver.Config`" ); - writer.newLine(); - writer.close(); - } - - return knownHosts; - } - - /** - * No issuer is trusted. - */ - public X509Certificate[] getAcceptedIssuers() - { - return new X509Certificate[0]; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/spi/Connection.java b/src/graiph-driver/java/org/neo4j/driver/internal/spi/Connection.java deleted file mode 100644 index 9a53c77c..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/spi/Connection.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.spi; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.messaging.BoltProtocol; -import org.neo4j.driver.internal.messaging.Message; -import org.neo4j.driver.internal.util.ServerVersion; - -import static java.lang.String.format; - -public interface Connection -{ - boolean isOpen(); - - void enableAutoRead(); - - void disableAutoRead(); - - void write( Message message, ResponseHandler handler ); - - void write( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2 ); - - void writeAndFlush( Message message, ResponseHandler handler ); - - void writeAndFlush( Message message1, ResponseHandler handler1, Message message2, ResponseHandler handler2 ); - - CompletionStage reset(); - - CompletionStage release(); - - void terminateAndRelease( String reason ); - - BoltServerAddress serverAddress(); - - ServerVersion serverVersion(); - - BoltProtocol protocol(); - - default AccessMode mode() - { - throw new UnsupportedOperationException( format( "%s does not support access mode.", getClass() ) ); - } - - default String databaseName() - { - throw new UnsupportedOperationException( format( "%s does not support database name.", getClass() ) ); - } - - void flush(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/spi/ConnectionPool.java b/src/graiph-driver/java/org/neo4j/driver/internal/spi/ConnectionPool.java deleted file mode 100644 index 4a73bf8d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/spi/ConnectionPool.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.spi; - -import java.util.Set; -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.internal.BoltServerAddress; - -public interface ConnectionPool -{ - CompletionStage acquire( BoltServerAddress address ); - - void retainAll( Set addressesToRetain ); - - int inUseConnections( BoltServerAddress address ); - - int idleConnections( BoltServerAddress address ); - - CompletionStage close(); - - boolean isOpen( BoltServerAddress address ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/spi/ConnectionProvider.java b/src/graiph-driver/java/org/neo4j/driver/internal/spi/ConnectionProvider.java deleted file mode 100644 index 78d19cf8..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/spi/ConnectionProvider.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.spi; - -import java.util.concurrent.CompletionStage; - -import org.neo4j.driver.AccessMode; - -/** - * Interface defines a layer used by the driver to obtain connections. It is meant to be the only component that - * differs between "direct" and "routing" driver. - */ -public interface ConnectionProvider -{ - CompletionStage acquireConnection( String databaseName, AccessMode mode ); - - /** - * The validation of connectivity will happen with the default database. - */ - CompletionStage verifyConnectivity(); - - CompletionStage close(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/spi/ResponseHandler.java b/src/graiph-driver/java/org/neo4j/driver/internal/spi/ResponseHandler.java deleted file mode 100644 index 36ecfb4e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/spi/ResponseHandler.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.spi; - -import java.util.Map; - -import org.neo4j.driver.internal.async.inbound.InboundMessageDispatcher; -import org.neo4j.driver.Value; - -public interface ResponseHandler -{ - void onSuccess( Map metadata ); - - void onFailure( Throwable error ); - - void onRecord( Value[] fields ); - - /** - * Tells whether this response handler is able to manage auto-read of the underlying connection using {@link Connection#enableAutoRead()} and - * {@link Connection#disableAutoRead()}. - *

- * Implementations can use auto-read management to apply network-level backpressure when receiving a stream of records. - * There should only be a single such handler active for a connection at one point in time. Otherwise, handlers can interfere and turn on/off auto-read - * racing with each other. {@link InboundMessageDispatcher} is responsible for tracking these handlers and disabling auto-read management to maintain just - * a single auto-read managing handler per connection. - */ - default boolean canManageAutoRead() - { - return false; - } - - /** - * If this response handler is able to manage auto-read of the underlying connection, then this method signals it to - * stop changing auto-read setting for the connection. - */ - default void disableAutoReadManagement() - { - - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalInputPosition.java b/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalInputPosition.java deleted file mode 100644 index 1a708ae5..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalInputPosition.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.summary; - -import java.util.Objects; - -import org.neo4j.driver.summary.InputPosition; - -/** - * An input position refers to a specific point in a query string. - */ -public class InternalInputPosition implements InputPosition -{ - private final int offset; - private final int line; - private final int column; - - /** - * Creating a position from and offset, line number and a column number. - * - * @param offset the offset from the start of the string, starting from 0. - * @param line the line number, starting from 1. - * @param column the column number, starting from 1. - */ - public InternalInputPosition( int offset, int line, int column ) - { - this.offset = offset; - this.line = line; - this.column = column; - } - - @Override - public int offset() - { - return offset; - } - - @Override - public int line() - { - return line; - } - - @Override - public int column() - { - return column; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - InternalInputPosition that = (InternalInputPosition) o; - return offset == that.offset && - line == that.line && - column == that.column; - } - - @Override - public int hashCode() - { - return Objects.hash( offset, line, column ); - } - - @Override - public String toString() - { - return "offset=" + offset + ", line=" + line + ", column=" + column; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalNotification.java b/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalNotification.java deleted file mode 100644 index 1caaffd6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalNotification.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.summary; - -import java.util.function.Function; -import org.neo4j.driver.summary.InputPosition; -import org.neo4j.driver.summary.Notification; -import org.neo4j.driver.Value; - -import static org.neo4j.driver.internal.value.NullValue.NULL; - -public class InternalNotification implements Notification -{ - public static final Function VALUE_TO_NOTIFICATION = new Function() - { - @Override - public Notification apply( Value value ) - { - String code = value.get( "code" ).asString(); - String title = value.get( "title" ).asString(); - String description = value.get( "description" ).asString(); - String severity = value.containsKey( "severity" ) ? - value.get( "severity" ).asString() - : "N/A"; - - Value posValue = value.get( "position" ); - InputPosition position = null; - if( posValue != NULL ) - { - position = new InternalInputPosition( posValue.get( "offset" ).asInt(), - posValue.get( "line" ).asInt(), - posValue.get( "column" ).asInt() ); - } - - return new InternalNotification( code, title, description, severity, position ); - } - }; - - private final String code; - private final String title; - private final String description; - private final String severity; - private final InputPosition position; - - public InternalNotification( String code, String title, String description, String severity, InputPosition position ) - { - this.code = code; - this.title = title; - this.description = description; - this.severity = severity; - this.position = position; - } - - @Override - public String code() - { - return code; - } - - @Override - public String title() - { - return title; - } - - @Override - public String description() - { - return description; - } - - @Override - public InputPosition position() - { - return position; - } - - @Override - public String severity() - { - return severity; - } - - @Override - public String toString() - { - String info = "code=" + code + ", title=" + title + ", description=" + description + ", severity=" + severity; - return position == null ? info : info + ", position={" + position + "}"; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalPlan.java b/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalPlan.java deleted file mode 100644 index dbe842e0..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalPlan.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.summary; - -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import org.neo4j.driver.summary.Plan; -import java.util.function.Function; - -import static java.lang.String.format; -import static org.neo4j.driver.Values.ofString; - -public class InternalPlan implements Plan -{ - private final String operatorType; - private final List identifiers; - private final Map arguments; - private final List children; - - // Only call when sub-classing, for constructing plans, use .plan instead - protected InternalPlan( - String operatorType, - Map arguments, - List identifiers, - List children ) - { - this.operatorType = operatorType; - this.identifiers = identifiers; - this.arguments = arguments; - this.children = children; - } - - @Override - public String operatorType() - { - return operatorType; - } - - @Override - public List identifiers() - { - return identifiers; - } - - @Override - public Map arguments() - { - return arguments; - } - - @Override - public List children() - { - return children; - } - - @Override - public String toString() - { - return format( - "SimplePlanTreeNode{operatorType='%s', arguments=%s, identifiers=%s, children=%s}", - operatorType, arguments, identifiers, children - ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - InternalPlan that = (InternalPlan) o; - - return operatorType.equals( that.operatorType ) - && arguments.equals( that.arguments ) - && identifiers.equals( that.identifiers ) - && children.equals( that.children ); - } - - @Override - public int hashCode() - { - int result = operatorType.hashCode(); - result = 31 * result + identifiers.hashCode(); - result = 31 * result + arguments.hashCode(); - result = 31 * result + children.hashCode(); - return result; - } - - public static Plan plan( - String operatorType, - Map arguments, - List identifiers, - List children ) - { - return EXPLAIN_PLAN.create( operatorType, arguments, identifiers, children, null ); - } - - public static final PlanCreator EXPLAIN_PLAN = new PlanCreator() - { - @Override - public Plan create( String operatorType, Map arguments, List identifiers, List children, Value originalPlanValue ) - { - return new InternalPlan<>( operatorType, arguments, identifiers, children ); - } - }; - - /** Builds a regular plan without profiling information - eg. a plan that came as a result of an `EXPLAIN` statement */ - public static final Function EXPLAIN_PLAN_FROM_VALUE = new Converter<>(EXPLAIN_PLAN); - - /** - * Since a plan with or without profiling looks almost the same, we just keep two impls. of this - * around to contain the small difference, and share the rest of the code for building plan trees. - * @param - */ - interface PlanCreator - { - T create( String operatorType, - Map arguments, - List identifiers, - List children, - Value originalPlanValue ); - } - - static class Converter implements Function - { - private final PlanCreator planCreator; - - public Converter( PlanCreator planCreator ) - { - this.planCreator = planCreator; - } - - @Override - public T apply( Value plan ) - { - final String operatorType = plan.get( "operatorType" ).asString(); - - final Value argumentsValue = plan.get( "args" ); - final Map arguments = argumentsValue.isNull() - ? Collections.emptyMap() - : argumentsValue.asMap( Values.ofValue() ); - - final Value identifiersValue = plan.get( "identifiers" ); - final List identifiers = identifiersValue.isNull() - ? Collections.emptyList() - : identifiersValue.asList( ofString() ); - - final Value childrenValue = plan.get( "children" ); - final List children = childrenValue.isNull() - ? Collections.emptyList() - : childrenValue.asList( this ); - - return planCreator.create( operatorType, arguments, identifiers, children, plan ); - } - } -} - diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalProfiledPlan.java b/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalProfiledPlan.java deleted file mode 100644 index 910c5864..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalProfiledPlan.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.summary; - -import java.util.List; -import java.util.Map; - -import org.neo4j.driver.Value; -import org.neo4j.driver.summary.ProfiledPlan; -import java.util.function.Function; - -public class InternalProfiledPlan extends InternalPlan implements ProfiledPlan -{ - private final long dbHits; - private final long records; - - protected InternalProfiledPlan( String operatorType, Map arguments, - List identifiers, List children, long dbHits, long records ) - { - super( operatorType, arguments, identifiers, children ); - this.dbHits = dbHits; - this.records = records; - } - - @Override - public long dbHits() - { - return dbHits; - } - - @Override - public long records() - { - return records; - } - - public static final PlanCreator PROFILED_PLAN = new PlanCreator() - { - @Override - public ProfiledPlan create( String operatorType, Map arguments, List identifiers, List children, Value originalPlanValue ) - { - return new InternalProfiledPlan( operatorType, arguments, identifiers, children, - originalPlanValue.get( "dbHits" ).asLong(), - originalPlanValue.get( "rows" ).asLong() ); - } - }; - - /** Builds a regular plan without profiling information - eg. a plan that came as a result of an `EXPLAIN` statement */ - public static final Function PROFILED_PLAN_FROM_VALUE = new Converter<>(PROFILED_PLAN); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalResultSummary.java b/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalResultSummary.java deleted file mode 100644 index bab1155a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalResultSummary.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.summary; - -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.TimeUnit; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.summary.Notification; -import org.neo4j.driver.summary.Plan; -import org.neo4j.driver.summary.ProfiledPlan; -import org.neo4j.driver.summary.ResultSummary; -import org.neo4j.driver.summary.ServerInfo; -import org.neo4j.driver.summary.StatementType; -import org.neo4j.driver.summary.SummaryCounters; - -public class InternalResultSummary implements ResultSummary -{ - private final Statement statement; - private final ServerInfo serverInfo; - private final StatementType statementType; - private final SummaryCounters counters; - private final Plan plan; - private final ProfiledPlan profile; - private final List notifications; - private final long resultAvailableAfter; - private final long resultConsumedAfter; - - public InternalResultSummary( Statement statement, ServerInfo serverInfo, StatementType statementType, - SummaryCounters counters, Plan plan, ProfiledPlan profile, List notifications, - long resultAvailableAfter, long resultConsumedAfter ) - { - this.statement = statement; - this.serverInfo = serverInfo; - this.statementType = statementType; - this.counters = counters; - this.plan = resolvePlan( plan, profile ); - this.profile = profile; - this.notifications = notifications; - this.resultAvailableAfter = resultAvailableAfter; - this.resultConsumedAfter = resultConsumedAfter; - } - - @Override - public Statement statement() - { - return statement; - } - - @Override - public SummaryCounters counters() - { - return counters == null ? InternalSummaryCounters.EMPTY_STATS : counters; - } - - @Override - public StatementType statementType() - { - return statementType; - } - - @Override - public boolean hasPlan() - { - return plan != null; - } - - @Override - public boolean hasProfile() - { - return profile != null; - } - - @Override - public Plan plan() - { - return plan; - } - - @Override - public ProfiledPlan profile() - { - return profile; - } - - @Override - public List notifications() - { - return notifications == null ? Collections.emptyList() : notifications; - } - - @Override - public long resultAvailableAfter( TimeUnit unit ) - { - return resultAvailableAfter == -1 ? resultAvailableAfter - : unit.convert( resultAvailableAfter, TimeUnit.MILLISECONDS ); - } - - @Override - public long resultConsumedAfter( TimeUnit unit ) - { - return resultConsumedAfter == -1 ? resultConsumedAfter - : unit.convert( resultConsumedAfter, TimeUnit.MILLISECONDS ); - } - - @Override - public ServerInfo server() - { - return serverInfo; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - InternalResultSummary that = (InternalResultSummary) o; - return resultAvailableAfter == that.resultAvailableAfter && - resultConsumedAfter == that.resultConsumedAfter && - Objects.equals( statement, that.statement ) && - Objects.equals( serverInfo, that.serverInfo ) && - statementType == that.statementType && - Objects.equals( counters, that.counters ) && - Objects.equals( plan, that.plan ) && - Objects.equals( profile, that.profile ) && - Objects.equals( notifications, that.notifications ); - } - - @Override - public int hashCode() - { - return Objects.hash( statement, serverInfo, statementType, counters, plan, profile, notifications, - resultAvailableAfter, resultConsumedAfter ); - } - - @Override - public String toString() - { - return "InternalResultSummary{" + - "statement=" + statement + - ", serverInfo=" + serverInfo + - ", statementType=" + statementType + - ", counters=" + counters + - ", plan=" + plan + - ", profile=" + profile + - ", notifications=" + notifications + - ", resultAvailableAfter=" + resultAvailableAfter + - ", resultConsumedAfter=" + resultConsumedAfter + - '}'; - } - - /** - * Profiled plan is a superset of plan. This method returns profiled plan if plan is {@code null}. - * - * @param plan the given plan, possibly {@code null}. - * @param profiledPlan the given profiled plan, possibly {@code null}. - * @return available plan. - */ - private static Plan resolvePlan( Plan plan, ProfiledPlan profiledPlan ) - { - return plan == null ? profiledPlan : plan; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalServerInfo.java b/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalServerInfo.java deleted file mode 100644 index 34c41000..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalServerInfo.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.summary; - -import org.neo4j.driver.internal.BoltServerAddress; -import org.neo4j.driver.internal.util.ServerVersion; -import org.neo4j.driver.summary.ServerInfo; - -public class InternalServerInfo implements ServerInfo -{ - private final String address; - private final String version; - - public InternalServerInfo( BoltServerAddress address, ServerVersion version ) - { - this.address = address.toString(); - this.version = version.toString(); - } - - @Override - public String address() - { - return address; - } - - @Override - public String version() - { - return version; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalSummaryCounters.java b/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalSummaryCounters.java deleted file mode 100644 index 33e68690..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/summary/InternalSummaryCounters.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.summary; - -import org.neo4j.driver.summary.SummaryCounters; - -public class InternalSummaryCounters implements SummaryCounters -{ - public static final InternalSummaryCounters EMPTY_STATS = - new InternalSummaryCounters( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ); - private final int nodesCreated; - private final int nodesDeleted; - private final int relationshipsCreated; - private final int relationshipsDeleted; - private final int propertiesSet; - private final int labelsAdded; - private final int labelsRemoved; - private final int indexesAdded; - private final int indexesRemoved; - private final int constraintsAdded; - private final int constraintsRemoved; - - public InternalSummaryCounters( - int nodesCreated, int nodesDeleted, - int relationshipsCreated, int relationshipsDeleted, - int propertiesSet, - int labelsAdded, int labelsRemoved, - int indexesAdded, int indexesRemoved, - int constraintsAdded, int constraintsRemoved ) - { - this.nodesCreated = nodesCreated; - this.nodesDeleted = nodesDeleted; - this.relationshipsCreated = relationshipsCreated; - this.relationshipsDeleted = relationshipsDeleted; - this.propertiesSet = propertiesSet; - this.labelsAdded = labelsAdded; - this.labelsRemoved = labelsRemoved; - this.indexesAdded = indexesAdded; - this.indexesRemoved = indexesRemoved; - this.constraintsAdded = constraintsAdded; - this.constraintsRemoved = constraintsRemoved; - } - - @Override - public boolean containsUpdates() - { - return - isPositive( nodesCreated ) - || isPositive( nodesDeleted ) - || isPositive( relationshipsCreated ) - || isPositive( relationshipsDeleted ) - || isPositive( propertiesSet ) - || isPositive( labelsAdded ) - || isPositive( labelsRemoved ) - || isPositive( indexesAdded ) - || isPositive( indexesRemoved ) - || isPositive( constraintsAdded ) - || isPositive( constraintsRemoved ); - } - - @Override - public int nodesCreated() - { - return nodesCreated; - } - - @Override - public int nodesDeleted() - { - return nodesDeleted; - } - - @Override - public int relationshipsCreated() - { - return relationshipsCreated; - } - - @Override - public int relationshipsDeleted() - { - return relationshipsDeleted; - } - - @Override - public int propertiesSet() - { - return propertiesSet; - } - - @Override - public int labelsAdded() - { - return labelsAdded; - } - - @Override - public int labelsRemoved() - { - return labelsRemoved; - } - - @Override - public int indexesAdded() - { - return indexesAdded; - } - - @Override - public int indexesRemoved() - { - return indexesRemoved; - } - - @Override - public int constraintsAdded() - { - return constraintsAdded; - } - - @Override - public int constraintsRemoved() - { - return constraintsRemoved; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - InternalSummaryCounters that = (InternalSummaryCounters) o; - - return nodesCreated == that.nodesCreated - && nodesDeleted == that.nodesDeleted - && relationshipsCreated == that.relationshipsCreated - && relationshipsDeleted == that.relationshipsDeleted - && propertiesSet == that.propertiesSet - && labelsAdded == that.labelsAdded - && labelsRemoved == that.labelsRemoved - && indexesAdded == that.indexesAdded - && indexesRemoved == that.indexesRemoved - && constraintsAdded == that.constraintsAdded - && constraintsRemoved == that.constraintsRemoved; - } - - @Override - public int hashCode() - { - int result = nodesCreated; - result = 31 * result + nodesDeleted; - result = 31 * result + relationshipsCreated; - result = 31 * result + relationshipsDeleted; - result = 31 * result + propertiesSet; - result = 31 * result + labelsAdded; - result = 31 * result + labelsRemoved; - result = 31 * result + indexesAdded; - result = 31 * result + indexesRemoved; - result = 31 * result + constraintsAdded; - result = 31 * result + constraintsRemoved; - return result; - } - - private boolean isPositive( int value ) - { - return value > 0; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/types/InternalMapAccessorWithDefaultValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/types/InternalMapAccessorWithDefaultValue.java deleted file mode 100644 index 65f449ff..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/types/InternalMapAccessorWithDefaultValue.java +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.types; - -import java.util.List; -import java.util.Map; - -import org.neo4j.driver.internal.AsValue; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import org.neo4j.driver.types.Entity; -import org.neo4j.driver.types.MapAccessorWithDefaultValue; -import org.neo4j.driver.types.Node; -import org.neo4j.driver.types.Path; -import org.neo4j.driver.types.Relationship; -import java.util.function.Function; - -public abstract class InternalMapAccessorWithDefaultValue implements MapAccessorWithDefaultValue -{ - public abstract Value get( String key ); - - @Override - public Value get( String key, Value defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private Value get( Value value, Value defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return ((AsValue) value).asValue(); - } - } - - @Override - public Object get( String key, Object defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private Object get(Value value, Object defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asObject(); - } - } - - @Override - public Number get( String key, Number defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private Number get( Value value, Number defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asNumber(); - } - } - - @Override - public Entity get( String key, Entity defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private Entity get( Value value, Entity defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asEntity(); - } - } - - @Override - public Node get( String key, Node defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private Node get( Value value, Node defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asNode(); - } - } - - @Override - public Path get( String key, Path defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private Path get( Value value, Path defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asPath(); - } - } - - @Override - public Relationship get( String key, Relationship defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private Relationship get( Value value, Relationship defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asRelationship(); - } - } - - @Override - public List get( String key, List defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private List get( Value value, List defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asList(); - } - } - - @Override - public List get( String key, List defaultValue, Function mapFunc ) - { - return get( get( key ), defaultValue, mapFunc ); - } - - private List get( Value value, List defaultValue, Function mapFunc ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asList( mapFunc ); - } - } - - @Override - public Map get( String key, Map defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private Map get( Value value, Map defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asMap(); - } - } - - @Override - public Map get( String key, Map defaultValue, Function mapFunc ) - { - return get( get( key ), defaultValue, mapFunc ); - } - - private Map get( Value value, Map defaultValue, Function mapFunc ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asMap( mapFunc ); - } - } - - @Override - public int get( String key, int defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private int get( Value value, int defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asInt(); - } - } - - @Override - public long get( String key, long defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private long get( Value value, long defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asLong(); - } - } - - @Override - public boolean get( String key, boolean defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private boolean get( Value value, boolean defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asBoolean(); - } - } - - @Override - public String get( String key, String defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private String get( Value value, String defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asString(); - } - } - - @Override - public float get( String key, float defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private float get( Value value, float defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asFloat(); - } - } - - @Override - public double get( String key, double defaultValue ) - { - return get( get( key ), defaultValue ); - } - - private double get( Value value, double defaultValue ) - { - if( value.equals( Values.NULL ) ) - { - return defaultValue; - } - else - { - return value.asDouble(); - } - } - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/types/InternalTypeSystem.java b/src/graiph-driver/java/org/neo4j/driver/internal/types/InternalTypeSystem.java deleted file mode 100644 index 765a8484..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/types/InternalTypeSystem.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.types; - -import org.neo4j.driver.Value; -import org.neo4j.driver.types.Type; -import org.neo4j.driver.types.TypeSystem; - -import static org.neo4j.driver.internal.types.TypeConstructor.ANY; -import static org.neo4j.driver.internal.types.TypeConstructor.BOOLEAN; -import static org.neo4j.driver.internal.types.TypeConstructor.BYTES; -import static org.neo4j.driver.internal.types.TypeConstructor.DATE; -import static org.neo4j.driver.internal.types.TypeConstructor.DATE_TIME; -import static org.neo4j.driver.internal.types.TypeConstructor.DURATION; -import static org.neo4j.driver.internal.types.TypeConstructor.FLOAT; -import static org.neo4j.driver.internal.types.TypeConstructor.INTEGER; -import static org.neo4j.driver.internal.types.TypeConstructor.LIST; -import static org.neo4j.driver.internal.types.TypeConstructor.LOCAL_DATE_TIME; -import static org.neo4j.driver.internal.types.TypeConstructor.LOCAL_TIME; -import static org.neo4j.driver.internal.types.TypeConstructor.MAP; -import static org.neo4j.driver.internal.types.TypeConstructor.NODE; -import static org.neo4j.driver.internal.types.TypeConstructor.NULL; -import static org.neo4j.driver.internal.types.TypeConstructor.NUMBER; -import static org.neo4j.driver.internal.types.TypeConstructor.PATH; -import static org.neo4j.driver.internal.types.TypeConstructor.POINT; -import static org.neo4j.driver.internal.types.TypeConstructor.RELATIONSHIP; -import static org.neo4j.driver.internal.types.TypeConstructor.STRING; -import static org.neo4j.driver.internal.types.TypeConstructor.TIME; - -/** - * Utility class for determining and working with the Cypher types of values - * - * @see Value - * @see Type - */ -public class InternalTypeSystem implements TypeSystem -{ - public static InternalTypeSystem TYPE_SYSTEM = new InternalTypeSystem(); - - private final TypeRepresentation anyType = constructType( ANY ); - private final TypeRepresentation booleanType = constructType( BOOLEAN ); - private final TypeRepresentation bytesType = constructType( BYTES ); - private final TypeRepresentation stringType = constructType( STRING ); - private final TypeRepresentation numberType = constructType( NUMBER ); - private final TypeRepresentation integerType = constructType( INTEGER ); - private final TypeRepresentation floatType = constructType( FLOAT ); - private final TypeRepresentation listType = constructType( LIST ); - private final TypeRepresentation mapType = constructType( MAP ); - private final TypeRepresentation nodeType = constructType( NODE ); - private final TypeRepresentation relationshipType = constructType( RELATIONSHIP ); - private final TypeRepresentation pathType = constructType( PATH ); - private final TypeRepresentation pointType = constructType( POINT ); - private final TypeRepresentation dateType = constructType( DATE ); - private final TypeRepresentation timeType = constructType( TIME ); - private final TypeRepresentation localTimeType = constructType( LOCAL_TIME ); - private final TypeRepresentation localDateTimeType = constructType( LOCAL_DATE_TIME ); - private final TypeRepresentation dateTimeType = constructType( DATE_TIME ); - private final TypeRepresentation durationType = constructType( DURATION ); - private final TypeRepresentation nullType = constructType( NULL ); - - private InternalTypeSystem() - { - } - - @Override - public Type ANY() - { - return anyType; - } - - @Override - public Type BOOLEAN() - { - return booleanType; - } - - @Override - public Type BYTES() - { - return bytesType; - } - - @Override - public Type STRING() - { - return stringType; - } - - @Override - public Type NUMBER() - { - return numberType; - } - - @Override - public Type INTEGER() - { - return integerType; - } - - @Override - public Type FLOAT() - { - return floatType; - } - - @Override - public Type LIST() - { - return listType; - } - - @Override - public Type MAP() - { - return mapType; - } - - @Override - public Type NODE() - { - return nodeType; - } - - @Override - public Type RELATIONSHIP() - { - return relationshipType; - } - - @Override - public Type PATH() - { - return pathType; - } - - @Override - public Type POINT() - { - return pointType; - } - - @Override - public Type DATE() - { - return dateType; - } - - @Override - public Type TIME() - { - return timeType; - } - - @Override - public Type LOCAL_TIME() - { - return localTimeType; - } - - @Override - public Type LOCAL_DATE_TIME() - { - return localDateTimeType; - } - - @Override - public Type DATE_TIME() - { - return dateTimeType; - } - - @Override - public Type DURATION() - { - return durationType; - } - - @Override - public Type NULL() - { - return nullType; - } - - private TypeRepresentation constructType( TypeConstructor tyCon ) - { - return new TypeRepresentation( tyCon ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/types/TypeRepresentation.java b/src/graiph-driver/java/org/neo4j/driver/internal/types/TypeRepresentation.java deleted file mode 100644 index 0d8f3cdc..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/types/TypeRepresentation.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.types; - -import org.neo4j.driver.Value; -import org.neo4j.driver.types.Type; - -import static org.neo4j.driver.internal.types.TypeConstructor.LIST; - -public class TypeRepresentation implements Type -{ - private final TypeConstructor tyCon; - - public TypeRepresentation( TypeConstructor tyCon ) - { - this.tyCon = tyCon; - } - - @Override - public boolean isTypeOf( Value value ) - { - return tyCon.covers( value ); - } - - @Override - public String name() - { - if ( tyCon == LIST ) - { - return "LIST OF ANY?"; - } - - return tyCon.toString(); - } - - public TypeConstructor constructor() - { - return tyCon; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { return true; } - if ( o == null || getClass() != o.getClass() ) - { return false; } - - TypeRepresentation that = (TypeRepresentation) o; - - return tyCon == that.tyCon; - } - - @Override - public int hashCode() - { - return tyCon.hashCode(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/CertificateTool.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/CertificateTool.java deleted file mode 100644 index fa3898fd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/CertificateTool.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -import java.io.BufferedInputStream; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.cert.Certificate; -import java.security.cert.CertificateException; -import java.security.cert.CertificateFactory; -import java.util.Base64; - -/** - * A tool related to save, load certs, etc. - */ -public class CertificateTool -{ - private static final String BEGIN_CERT = "-----BEGIN CERTIFICATE-----"; - private static final String END_CERT = "-----END CERTIFICATE-----"; - - /** - * Save a certificate to a file in base 64 binary format with BEGIN and END strings - * @param certStr - * @param certFile - * @throws IOException - */ - public static void saveX509Cert( String certStr, File certFile ) throws IOException - { - try ( BufferedWriter writer = new BufferedWriter( new FileWriter( certFile ) ) ) - { - writer.write( BEGIN_CERT ); - writer.newLine(); - - writer.write( certStr ); - writer.newLine(); - - writer.write( END_CERT ); - writer.newLine(); - } - } - - /** - * Save a certificate to a file. Remove all the content in the file if there is any before. - * - * @param cert - * @param certFile - * @throws GeneralSecurityException - * @throws IOException - */ - public static void saveX509Cert( Certificate cert, File certFile ) throws GeneralSecurityException, IOException - { - saveX509Cert( new Certificate[]{cert}, certFile ); - } - - /** - * Save a list of certificates into a file - * - * @param certs - * @param certFile - * @throws GeneralSecurityException - * @throws IOException - */ - public static void saveX509Cert( Certificate[] certs, File certFile ) throws GeneralSecurityException, IOException - { - try ( BufferedWriter writer = new BufferedWriter( new FileWriter( certFile ) ) ) - { - for ( Certificate cert : certs ) - { - String certStr = Base64.getEncoder().encodeToString( cert.getEncoded() ).replaceAll( "(.{64})", "$1\n" ); - - writer.write( BEGIN_CERT ); - writer.newLine(); - - writer.write( certStr ); - writer.newLine(); - - writer.write( END_CERT ); - writer.newLine(); - } - } - } - - /** - * Load the certificates written in X.509 format in a file to a key store. - * - * @param certFile - * @param keyStore - * @throws GeneralSecurityException - * @throws IOException - */ - public static void loadX509Cert( File certFile, KeyStore keyStore ) throws GeneralSecurityException, IOException - { - try ( BufferedInputStream inputStream = new BufferedInputStream( new FileInputStream( certFile ) ) ) - { - CertificateFactory certFactory = CertificateFactory.getInstance( "X.509" ); - - int certCount = 0; // The file might contain multiple certs - while ( inputStream.available() > 0 ) - { - try - { - Certificate cert = certFactory.generateCertificate( inputStream ); - certCount++; - loadX509Cert( cert, "neo4j.javadriver.trustedcert." + certCount, keyStore ); - } - catch ( CertificateException e ) - { - if ( e.getCause() != null && e.getCause().getMessage().equals( "Empty input" ) ) - { - // This happens if there is whitespace at the end of the certificate - we load one cert, and then try and load a - // second cert, at which point we fail - return; - } - throw new IOException( "Failed to load certificate from `" + certFile.getAbsolutePath() + "`: " + certCount + " : " + e.getMessage(), e ); - } - } - } - } - - /** - * Load a certificate to a key store with a name - * - * @param certAlias a name to identify different certificates - * @param cert - * @param keyStore - */ - public static void loadX509Cert( Certificate cert, String certAlias, KeyStore keyStore ) throws KeyStoreException - { - keyStore.setCertificateEntry( certAlias, cert ); - } - - /** - * Convert a certificate in base 64 binary format with BEGIN and END strings - * @param cert encoded cert string - * @return - */ - public static String X509CertToString( String cert ) - { - String cert64CharPerLine = cert.replaceAll( "(.{64})", "$1\n" ); - return BEGIN_CERT + "\n" + cert64CharPerLine + "\n"+ END_CERT + "\n"; - } -} - - - diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/Clock.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/Clock.java deleted file mode 100644 index 46a58988..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/Clock.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -/** - * Since {@link java.time.Clock} is only available in Java 8, use our own until we drop java 7 support. - */ -public interface Clock -{ - /** Current time, in milliseconds. */ - long millis(); - - void sleep( long millis ) throws InterruptedException; - - Clock SYSTEM = new Clock() - { - @Override - public long millis() - { - return System.currentTimeMillis(); - } - - @Override - public void sleep( long millis ) throws InterruptedException - { - Thread.sleep( millis ); - } - }; -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/ErrorUtil.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/ErrorUtil.java deleted file mode 100644 index 43d4821d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/ErrorUtil.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -import io.netty.util.internal.PlatformDependent; - -import java.util.concurrent.ExecutionException; -import java.util.stream.Stream; - -import org.neo4j.driver.exceptions.AuthenticationException; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.exceptions.DatabaseException; -import org.neo4j.driver.exceptions.Neo4jException; -import org.neo4j.driver.exceptions.ServiceUnavailableException; -import org.neo4j.driver.exceptions.TransientException; - -public final class ErrorUtil -{ - private ErrorUtil() - { - } - - public static ServiceUnavailableException newConnectionTerminatedError( String reason ) - { - if ( reason == null ) - { - return newConnectionTerminatedError(); - } - return new ServiceUnavailableException( "Connection to the database terminated. " + reason ); - } - - public static ServiceUnavailableException newConnectionTerminatedError() - { - return new ServiceUnavailableException( "Connection to the database terminated. " + - "This can happen due to network instabilities, " + - "or due to restarts of the database" ); - } - - public static Neo4jException newNeo4jError( String code, String message ) - { - String classification = extractClassification( code ); - switch ( classification ) - { - case "ClientError": - if ( code.equalsIgnoreCase( "Neo.ClientError.Security.Unauthorized" ) ) - { - return new AuthenticationException( code, message ); - } - else - { - return new ClientException( code, message ); - } - case "TransientError": - return new TransientException( code, message ); - default: - return new DatabaseException( code, message ); - } - } - - public static boolean isFatal( Throwable error ) - { - if ( error instanceof Neo4jException ) - { - if ( isProtocolViolationError( ((Neo4jException) error) ) ) - { - return true; - } - - if ( isClientOrTransientError( ((Neo4jException) error) ) ) - { - return false; - } - } - return true; - } - - public static void rethrowAsyncException( ExecutionException e ) - { - Throwable error = e.getCause(); - - InternalExceptionCause internalCause = new InternalExceptionCause( error.getStackTrace() ); - error.addSuppressed( internalCause ); - - StackTraceElement[] currentStackTrace = Stream.of( Thread.currentThread().getStackTrace() ) - .skip( 2 ) // do not include Thread.currentThread() and this method in the stacktrace - .toArray( StackTraceElement[]::new ); - error.setStackTrace( currentStackTrace ); - - PlatformDependent.throwException( error ); - } - - private static boolean isProtocolViolationError( Neo4jException error ) - { - String errorCode = error.code(); - return errorCode != null && errorCode.startsWith( "Neo.ClientError.Request" ); - } - - private static boolean isClientOrTransientError( Neo4jException error ) - { - String errorCode = error.code(); - return errorCode != null && (errorCode.contains( "ClientError" ) || errorCode.contains( "TransientError" )); - } - - private static String extractClassification( String code ) - { - String[] parts = code.split( "\\." ); - if ( parts.length < 2 ) - { - return ""; - } - return parts[1]; - } - - /** - * Exception which is merely a holder of an async stacktrace, which is not the primary stacktrace users are interested in. - * Used for blocking API calls that block on async API calls. - */ - private static class InternalExceptionCause extends RuntimeException - { - InternalExceptionCause( StackTraceElement[] stackTrace ) - { - setStackTrace( stackTrace ); - } - - @Override - public synchronized Throwable fillInStackTrace() - { - // no need to fill in the stack trace - // this exception just uses the given stack trace - return this; - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/Extract.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/Extract.java deleted file mode 100644 index 840544fa..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/Extract.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; - -import org.neo4j.driver.internal.InternalPair; -import org.neo4j.driver.internal.value.NodeValue; -import org.neo4j.driver.internal.value.PathValue; -import org.neo4j.driver.internal.value.RelationshipValue; -import org.neo4j.driver.Record; -import org.neo4j.driver.Value; -import org.neo4j.driver.exceptions.ClientException; -import org.neo4j.driver.types.MapAccessor; -import org.neo4j.driver.types.Node; -import org.neo4j.driver.types.Path; -import org.neo4j.driver.types.Relationship; -import java.util.function.Function; -import org.neo4j.driver.util.Pair; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; -import static java.util.Collections.unmodifiableList; -import static java.util.Collections.unmodifiableMap; -import static org.neo4j.driver.internal.util.Iterables.newHashMapWithSize; -import static org.neo4j.driver.Values.value; - -/** - * Utility class for extracting data. - */ -public final class Extract -{ - private Extract() - { - throw new UnsupportedOperationException(); - } - - public static List list( Value[] values ) - { - switch ( values.length ) - { - case 0: - return emptyList(); - case 1: - return singletonList( values[0] ); - default: - return unmodifiableList( Arrays.asList( values ) ); - } - } - - public static List list( Value[] data, Function mapFunction ) - { - int size = data.length; - switch ( size ) - { - case 0: - return emptyList(); - case 1: - return singletonList( mapFunction.apply( data[0] ) ); - default: - List result = new ArrayList<>( size ); - for ( Value value : data ) - { - result.add( mapFunction.apply( value ) ); - } - return unmodifiableList( result ); - } - } - - public static Map map( Map data, Function mapFunction ) - { - if ( data.isEmpty() ) { - return emptyMap(); - } else { - int size = data.size(); - if ( size == 1 ) { - Map.Entry head = data.entrySet().iterator().next(); - return singletonMap( head.getKey(), mapFunction.apply( head.getValue() ) ); - } else { - Map map = Iterables.newLinkedHashMapWithSize( size ); - for ( Map.Entry entry : data.entrySet() ) - { - map.put( entry.getKey(), mapFunction.apply( entry.getValue() ) ); - } - return unmodifiableMap( map ); - } - } - } - - public static Map map( Record record, Function mapFunction ) - { - int size = record.size(); - switch ( size ) - { - case 0: - return emptyMap(); - - case 1: - return singletonMap( record.keys().get( 0 ), mapFunction.apply( record.get( 0 ) ) ); - - default: - Map map = Iterables.newLinkedHashMapWithSize( size ); - List keys = record.keys(); - for ( int i = 0; i < size; i++ ) - { - map.put( keys.get( i ), mapFunction.apply( record.get( i ) ) ); - } - return unmodifiableMap( map ); - } - } - - public static Iterable> properties( final MapAccessor map, final Function mapFunction ) - { - int size = map.size(); - switch ( size ) - { - case 0: - return emptyList(); - - case 1: - { - String key = map.keys().iterator().next(); - Value value = map.get( key ); - return singletonList( InternalPair.of( key, mapFunction.apply( value ) ) ); - } - - default: - { - List> list = new ArrayList<>( size ); - for ( String key : map.keys() ) - { - Value value = map.get( key ); - list.add( InternalPair.of( key, mapFunction.apply( value ) ) ); - } - return unmodifiableList( list ); - } - } - } - - public static List> fields( final Record map, final Function mapFunction ) - { - int size = map.keys().size(); - switch ( size ) - { - case 0: - return emptyList(); - - case 1: - { - String key = map.keys().iterator().next(); - Value value = map.get( key ); - return singletonList( InternalPair.of( key, mapFunction.apply( value ) ) ); - } - - default: - { - List> list = new ArrayList<>( size ); - List keys = map.keys(); - for ( int i = 0; i < size; i++ ) - { - String key = keys.get( i ); - Value value = map.get( i ); - list.add( InternalPair.of( key, mapFunction.apply( value ) ) ); - } - return unmodifiableList( list ); - } - } - } - - public static Map mapOfValues( Map map ) - { - if ( map == null || map.isEmpty() ) - { - return emptyMap(); - } - - Map result = newHashMapWithSize( map.size() ); - for ( Map.Entry entry : map.entrySet() ) - { - Object value = entry.getValue(); - assertParameter( value ); - result.put( entry.getKey(), value( value ) ); - } - return result; - } - - public static void assertParameter( Object value ) - { - if ( value instanceof Node || value instanceof NodeValue ) - { - throw new ClientException( "Nodes can't be used as parameters." ); - } - if ( value instanceof Relationship || value instanceof RelationshipValue ) - { - throw new ClientException( "Relationships can't be used as parameters." ); - } - if ( value instanceof Path || value instanceof PathValue ) - { - throw new ClientException( "Paths can't be used as parameters." ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/Format.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/Format.java deleted file mode 100644 index 1421245f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/Format.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; - -public abstract class Format -{ - private Format() - { - throw new UnsupportedOperationException(); - } - - // formats map using ':' as key-value separator instead of default '=' - public static String formatPairs( Map entries ) - { - Iterator> iterator = entries.entrySet().iterator(); - switch ( entries.size() ) { - case 0: - return "{}"; - - case 1: - { - return String.format( "{%s}", keyValueString( iterator.next() ) ); - } - - default: - { - StringBuilder builder = new StringBuilder(); - builder.append( "{" ); - builder.append( keyValueString( iterator.next() ) ); - while ( iterator.hasNext() ) - { - builder.append( ',' ); - builder.append( ' ' ); - builder.append( keyValueString( iterator.next() ) ); - } - builder.append( "}" ); - return builder.toString(); - } - } - } - - private static String keyValueString( Entry entry ) - { - return String.format( "%s: %s", entry.getKey(), String.valueOf( entry.getValue() ) ); - } - - /** - * Returns the submitted value if it is not null or an empty string if it is. - */ - public static String valueOrEmpty( String value ) - { - return value != null ? value : ""; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/Futures.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/Futures.java deleted file mode 100644 index e0a50d4a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/Futures.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; - -import org.neo4j.driver.internal.async.connection.EventLoopGroupFactory; - -import static java.util.concurrent.CompletableFuture.completedFuture; - -public final class Futures -{ - private static final CompletableFuture COMPLETED_WITH_NULL = completedFuture( null ); - - private Futures() - { - } - - @SuppressWarnings( "unchecked" ) - public static CompletableFuture completedWithNull() - { - return (CompletableFuture) COMPLETED_WITH_NULL; - } - - public static CompletionStage asCompletionStage( io.netty.util.concurrent.Future future ) - { - CompletableFuture result = new CompletableFuture<>(); - if ( future.isCancelled() ) - { - result.cancel( true ); - } - else if ( future.isSuccess() ) - { - result.complete( future.getNow() ); - } - else if ( future.cause() != null ) - { - result.completeExceptionally( future.cause() ); - } - else - { - future.addListener( ignore -> - { - if ( future.isCancelled() ) - { - result.cancel( true ); - } - else if ( future.isSuccess() ) - { - result.complete( future.getNow() ); - } - else - { - result.completeExceptionally( future.cause() ); - } - } ); - } - return result; - } - - public static CompletableFuture failedFuture( Throwable error ) - { - CompletableFuture result = new CompletableFuture<>(); - result.completeExceptionally( error ); - return result; - } - - public static V blockingGet( CompletionStage stage ) - { - return blockingGet( stage, Futures::noOpInterruptHandler ); - } - - public static V blockingGet( CompletionStage stage, Runnable interruptHandler ) - { - EventLoopGroupFactory.assertNotInEventLoopThread(); - - Future future = stage.toCompletableFuture(); - boolean interrupted = false; - try - { - while ( true ) - { - try - { - return future.get(); - } - catch ( InterruptedException e ) - { - // this thread was interrupted while waiting - // computation denoted by the future might still be running - - interrupted = true; - - // run the interrupt handler and ignore if it throws - // need to wait for IO thread to actually finish, can't simply re-rethrow - safeRun( interruptHandler ); - } - catch ( ExecutionException e ) - { - ErrorUtil.rethrowAsyncException( e ); - } - } - } - finally - { - if ( interrupted ) - { - Thread.currentThread().interrupt(); - } - } - } - - public static T getNow( CompletionStage stage ) - { - return stage.toCompletableFuture().getNow( null ); - } - - /** - * Helper method to extract cause of a {@link CompletionException}. - *

- * When using {@link CompletionStage#whenComplete(BiConsumer)} and {@link CompletionStage#handle(BiFunction)} - * propagated exceptions might get wrapped in a {@link CompletionException}. - * - * @param error the exception to get cause for. - * @return cause of the given exception if it is a {@link CompletionException}, given exception otherwise. - */ - public static Throwable completionExceptionCause( Throwable error ) - { - if ( error instanceof CompletionException ) - { - return error.getCause(); - } - return error; - } - - /** - * Helped method to turn given exception into a {@link CompletionException}. - * - * @param error the exception to convert. - * @return given exception wrapped with {@link CompletionException} if it's not one already. - */ - public static CompletionException asCompletionException( Throwable error ) - { - if ( error instanceof CompletionException ) - { - return ((CompletionException) error); - } - return new CompletionException( error ); - } - - /** - * Combine given errors into a single {@link CompletionException} to be rethrown from inside a - * {@link CompletionStage} chain. - * - * @param error1 the first error or {@code null}. - * @param error2 the second error or {@code null}. - * @return {@code null} if both errors are null, {@link CompletionException} otherwise. - */ - public static CompletionException combineErrors( Throwable error1, Throwable error2 ) - { - if ( error1 != null && error2 != null ) - { - Throwable cause1 = completionExceptionCause( error1 ); - Throwable cause2 = completionExceptionCause( error2 ); - if ( cause1 != cause2 ) - { - cause1.addSuppressed( cause2 ); - } - return asCompletionException( cause1 ); - } - else if ( error1 != null ) - { - return asCompletionException( error1 ); - } - else if ( error2 != null ) - { - return asCompletionException( error2 ); - } - else - { - return null; - } - } - - private static void safeRun( Runnable runnable ) - { - try - { - runnable.run(); - } - catch ( Throwable ignore ) - { - } - } - - private static void noOpInterruptHandler() - { - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/Iterables.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/Iterables.java deleted file mode 100644 index 3751db93..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/Iterables.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -import java.util.AbstractQueue; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Queue; - -import java.util.function.Function; - -public class Iterables -{ - @SuppressWarnings( "rawtypes" ) - private static final Queue EMPTY_QUEUE = new EmptyQueue(); - private static final float DEFAULT_HASH_MAP_LOAD_FACTOR = 0.75F; - - public static int count( Iterable it ) - { - if ( it instanceof Collection ) { return ((Collection) it).size(); } - int size = 0; - for ( Object o : it ) - { - size++; - } - return size; - } - - public static List asList( Iterable it ) - { - if ( it instanceof List ) { return (List) it; } - List list = new ArrayList<>(); - for ( T t : it ) - { - list.add( t ); - } - return list; - } - - public static T single( Iterable it ) - { - Iterator iterator = it.iterator(); - if ( !iterator.hasNext() ) - { - throw new IllegalArgumentException( "Given iterable is empty" ); - } - T result = iterator.next(); - if ( iterator.hasNext() ) - { - throw new IllegalArgumentException( "Given iterable contains more than one element: " + it ); - } - return result; - } - - public static Map map( String ... alternatingKeyValue ) - { - Map out = newHashMapWithSize( alternatingKeyValue.length / 2 ); - for ( int i = 0; i < alternatingKeyValue.length; i+=2 ) - { - out.put( alternatingKeyValue[i], alternatingKeyValue[i+1] ); - } - return out; - } - - public static Iterable map(final Iterable it, final Function f) - { - return new Iterable() - { - @Override - public Iterator iterator() - { - final Iterator aIterator = it.iterator(); - return new Iterator() - { - @Override - public boolean hasNext() - { - return aIterator.hasNext(); - } - - @Override - public B next() - { - return f.apply( aIterator.next() ); - } - - @Override - public void remove() - { - aIterator.remove(); - } - }; - } - }; - } - - @SuppressWarnings( "unchecked" ) - public static Queue emptyQueue() - { - return (Queue) EMPTY_QUEUE; - } - - public static HashMap newHashMapWithSize( int expectedSize ) - { - return new HashMap<>( hashMapCapacity( expectedSize ) ); - } - - public static LinkedHashMap newLinkedHashMapWithSize( int expectedSize ) - { - return new LinkedHashMap<>( hashMapCapacity( expectedSize ) ); - } - - private static int hashMapCapacity( int expectedSize ) - { - if ( expectedSize < 3 ) - { - if ( expectedSize < 0 ) - { - throw new IllegalArgumentException( "Illegal map size: " + expectedSize ); - } - return expectedSize + 1; - } - return (int) ((float) expectedSize / DEFAULT_HASH_MAP_LOAD_FACTOR + 1.0F); - } - - private static class EmptyQueue extends AbstractQueue - { - @Override - public Iterator iterator() - { - return Collections.emptyIterator(); - } - - @Override - public int size() - { - return 0; - } - - @Override - public boolean offer( T t ) - { - throw new UnsupportedOperationException(); - } - - @Override - public T poll() - { - return null; - } - - @Override - public T peek() - { - return null; - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/MetadataExtractor.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/MetadataExtractor.java deleted file mode 100644 index d39b7192..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/MetadataExtractor.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import org.neo4j.driver.internal.Bookmarks; -import org.neo4j.driver.internal.spi.Connection; -import org.neo4j.driver.internal.summary.InternalNotification; -import org.neo4j.driver.internal.summary.InternalPlan; -import org.neo4j.driver.internal.summary.InternalProfiledPlan; -import org.neo4j.driver.internal.summary.InternalResultSummary; -import org.neo4j.driver.internal.summary.InternalServerInfo; -import org.neo4j.driver.internal.summary.InternalSummaryCounters; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; -import org.neo4j.driver.exceptions.UntrustedServerException; -import org.neo4j.driver.summary.Notification; -import org.neo4j.driver.summary.Plan; -import org.neo4j.driver.summary.ProfiledPlan; -import org.neo4j.driver.summary.ResultSummary; -import org.neo4j.driver.summary.ServerInfo; -import org.neo4j.driver.summary.StatementType; - -import static java.util.Collections.emptyList; -import static org.neo4j.driver.internal.types.InternalTypeSystem.TYPE_SYSTEM; - -public class MetadataExtractor -{ - public static final int ABSENT_QUERY_ID = -1; - private final String resultAvailableAfterMetadataKey; - private final String resultConsumedAfterMetadataKey; - - public MetadataExtractor( String resultAvailableAfterMetadataKey, String resultConsumedAfterMetadataKey ) - { - this.resultAvailableAfterMetadataKey = resultAvailableAfterMetadataKey; - this.resultConsumedAfterMetadataKey = resultConsumedAfterMetadataKey; - } - - public List extractStatementKeys( Map metadata ) - { - Value keysValue = metadata.get( "fields" ); - if ( keysValue != null ) - { - if ( !keysValue.isEmpty() ) - { - List keys = new ArrayList<>( keysValue.size() ); - for ( Value value : keysValue.values() ) - { - keys.add( value.asString() ); - } - - return keys; - } - } - return emptyList(); - } - - public long extractQueryId( Map metadata ) - { - Value statementId = metadata.get( "qid" ); - if ( statementId != null ) - { - return statementId.asLong(); - } - return ABSENT_QUERY_ID; - } - - - public long extractResultAvailableAfter( Map metadata ) - { - Value resultAvailableAfterValue = metadata.get( resultAvailableAfterMetadataKey ); - if ( resultAvailableAfterValue != null ) - { - return resultAvailableAfterValue.asLong(); - } - return -1; - } - - public ResultSummary extractSummary( Statement statement, Connection connection, long resultAvailableAfter, Map metadata ) - { - ServerInfo serverInfo = new InternalServerInfo( connection.serverAddress(), connection.serverVersion() ); - return new InternalResultSummary( statement, serverInfo, extractStatementType( metadata ), - extractCounters( metadata ), extractPlan( metadata ), extractProfiledPlan( metadata ), - extractNotifications( metadata ), resultAvailableAfter, extractResultConsumedAfter( metadata, resultConsumedAfterMetadataKey ) ); - } - - public Bookmarks extractBookmarks( Map metadata ) - { - Value bookmarkValue = metadata.get( "bookmark" ); - if ( bookmarkValue != null && !bookmarkValue.isNull() && bookmarkValue.hasType( TYPE_SYSTEM.STRING() ) ) - { - return Bookmarks.from( bookmarkValue.asString() ); - } - return Bookmarks.empty(); - } - - public static ServerVersion extractNeo4jServerVersion( Map metadata ) - { - Value versionValue = metadata.get( "server" ); - if ( versionValue == null || versionValue.isNull() ) - { - throw new UntrustedServerException( "Server provides no product identifier" ); - } - else - { - ServerVersion server = ServerVersion.version( versionValue.asString() ); - if ( ServerVersion.NEO4J_PRODUCT.equalsIgnoreCase( server.product() ) ) - { - return server; - } - else - { - throw new UntrustedServerException( "Server does not identify as a genuine Neo4j instance: '" + server.product() + "'" ); - } - } - } - - private static StatementType extractStatementType( Map metadata ) - { - Value typeValue = metadata.get( "type" ); - if ( typeValue != null ) - { - return StatementType.fromCode( typeValue.asString() ); - } - return null; - } - - private static InternalSummaryCounters extractCounters( Map metadata ) - { - Value countersValue = metadata.get( "stats" ); - if ( countersValue != null ) - { - return new InternalSummaryCounters( - counterValue( countersValue, "nodes-created" ), - counterValue( countersValue, "nodes-deleted" ), - counterValue( countersValue, "relationships-created" ), - counterValue( countersValue, "relationships-deleted" ), - counterValue( countersValue, "properties-set" ), - counterValue( countersValue, "labels-added" ), - counterValue( countersValue, "labels-removed" ), - counterValue( countersValue, "indexes-added" ), - counterValue( countersValue, "indexes-removed" ), - counterValue( countersValue, "constraints-added" ), - counterValue( countersValue, "constraints-removed" ) - ); - } - return null; - } - - private static int counterValue( Value countersValue, String name ) - { - Value value = countersValue.get( name ); - return value.isNull() ? 0 : value.asInt(); - } - - private static Plan extractPlan( Map metadata ) - { - Value planValue = metadata.get( "plan" ); - if ( planValue != null ) - { - return InternalPlan.EXPLAIN_PLAN_FROM_VALUE.apply( planValue ); - } - return null; - } - - private static ProfiledPlan extractProfiledPlan( Map metadata ) - { - Value profiledPlanValue = metadata.get( "profile" ); - if ( profiledPlanValue != null ) - { - return InternalProfiledPlan.PROFILED_PLAN_FROM_VALUE.apply( profiledPlanValue ); - } - return null; - } - - private static List extractNotifications( Map metadata ) - { - Value notificationsValue = metadata.get( "notifications" ); - if ( notificationsValue != null ) - { - return notificationsValue.asList( InternalNotification.VALUE_TO_NOTIFICATION ); - } - return Collections.emptyList(); - } - - private static long extractResultConsumedAfter( Map metadata, String key ) - { - Value resultConsumedAfterValue = metadata.get( key ); - if ( resultConsumedAfterValue != null ) - { - return resultConsumedAfterValue.asLong(); - } - return -1; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/Preconditions.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/Preconditions.java deleted file mode 100644 index bd25c492..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/Preconditions.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -public final class Preconditions -{ - private Preconditions() - { - } - - /** - * Assert that given expression is true. - * - * @param expression the value to check. - * @param message the message. - * @throws IllegalArgumentException if given value is {@code false}. - */ - public static void checkArgument( boolean expression, String message ) - { - if ( !expression ) - { - throw new IllegalArgumentException( message ); - } - } - - /** - * Assert that given argument is of expected type. - * - * @param argument the object to check. - * @param expectedClass the expected type. - * @throws IllegalArgumentException if argument is not of expected type. - */ - public static void checkArgument( Object argument, Class expectedClass ) - { - if ( !expectedClass.isInstance( argument ) ) - { - throw new IllegalArgumentException( "Argument expected to be of type: " + expectedClass.getName() + " but was: " + argument ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/util/ServerVersion.java b/src/graiph-driver/java/org/neo4j/driver/internal/util/ServerVersion.java deleted file mode 100644 index 244bfe0d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/util/ServerVersion.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.util; - -import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.neo4j.driver.Driver; -import org.neo4j.driver.Session; - -import static java.lang.Integer.compare; - -public class ServerVersion -{ - public static final String NEO4J_PRODUCT = "Neo4j"; - - public static final ServerVersion v4_0_0 = new ServerVersion( NEO4J_PRODUCT, 4, 0, 0 ); - public static final ServerVersion v3_5_0 = new ServerVersion( NEO4J_PRODUCT, 3, 5, 0 ); - public static final ServerVersion v3_4_0 = new ServerVersion( NEO4J_PRODUCT, 3, 4, 0 ); - public static final ServerVersion v3_2_0 = new ServerVersion( NEO4J_PRODUCT, 3, 2, 0 ); - public static final ServerVersion v3_1_0 = new ServerVersion( NEO4J_PRODUCT, 3, 1, 0 ); - public static final ServerVersion v3_0_0 = new ServerVersion( NEO4J_PRODUCT, 3, 0, 0 ); - public static final ServerVersion vInDev = new ServerVersion( NEO4J_PRODUCT, Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE ); - - private static final String NEO4J_IN_DEV_VERSION_STRING = NEO4J_PRODUCT + "/dev"; - private static final Pattern PATTERN = - Pattern.compile( "([^/]+)/(\\d+)\\.(\\d+)(?:\\.)?(\\d*)(\\.|-|\\+)?([0-9A-Za-z-.]*)?" ); - - private final String product; - private final int major; - private final int minor; - private final int patch; - private final String stringValue; - - private ServerVersion( String product, int major, int minor, int patch ) - { - this.product = product; - this.major = major; - this.minor = minor; - this.patch = patch; - this.stringValue = stringValue( product, major, minor, patch ); - } - - public String product() - { - return product; - } - - public static ServerVersion version( Driver driver ) - { - try ( Session session = driver.session() ) - { - String versionString = session.readTransaction( tx -> tx.run( "RETURN 1" ).consume().server().version() ); - return version( versionString ); - } - } - - public static ServerVersion version( String server ) - { - Matcher matcher = PATTERN.matcher( server ); - if ( matcher.matches() ) - { - String product = matcher.group( 1 ); - int major = Integer.valueOf( matcher.group( 2 ) ); - int minor = Integer.valueOf( matcher.group( 3 ) ); - String patchString = matcher.group( 4 ); - int patch = 0; - if ( patchString != null && !patchString.isEmpty() ) - { - patch = Integer.valueOf( patchString ); - } - return new ServerVersion( product, major, minor, patch ); - } - else if ( server.equalsIgnoreCase( NEO4J_IN_DEV_VERSION_STRING ) ) - { - return vInDev; - } - else - { - throw new IllegalArgumentException( "Cannot parse " + server ); - } - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { return true; } - if ( o == null || getClass() != o.getClass() ) - { return false; } - - ServerVersion that = (ServerVersion) o; - - if ( !product.equals( that.product ) ) - { return false; } - if ( major != that.major ) - { return false; } - if ( minor != that.minor ) - { return false; } - return patch == that.patch; - } - - @Override - public int hashCode() - { - return Objects.hash(product, major, minor, patch); - } - - public boolean greaterThan(ServerVersion other) - { - return compareTo( other ) > 0; - } - - public boolean greaterThanOrEqual(ServerVersion other) - { - return compareTo( other ) >= 0; - } - - public boolean lessThan(ServerVersion other) - { - return compareTo( other ) < 0; - } - - public boolean lessThanOrEqual(ServerVersion other) - { - return compareTo( other ) <= 0; - } - - private int compareTo( ServerVersion o ) - { - if ( !product.equals( o.product ) ) - { - throw new IllegalArgumentException( "Comparing different products '" + product + "' with '" + o.product + "'" ); - } - int c = compare( major, o.major ); - if (c == 0) - { - c = compare( minor, o.minor ); - if (c == 0) - { - c = compare( patch, o.patch ); - } - } - - return c; - } - - @Override - public String toString() - { - return stringValue; - } - - private static String stringValue( String product, int major, int minor, int patch ) - { - if ( major == Integer.MAX_VALUE && minor == Integer.MAX_VALUE && patch == Integer.MAX_VALUE ) - { - return NEO4J_IN_DEV_VERSION_STRING; - } - return String.format( "%s/%s.%s.%s", product, major, minor, patch ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/BooleanValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/BooleanValue.java deleted file mode 100644 index 050ea13e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/BooleanValue.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Type; - -public abstract class BooleanValue extends ValueAdapter -{ - private BooleanValue() - { - //do nothing - } - - public static BooleanValue TRUE = new TrueValue(); - public static BooleanValue FALSE = new FalseValue(); - - public static BooleanValue fromBoolean( boolean value ) - { - return value ? TRUE : FALSE; - } - - @Override - public abstract Boolean asObject(); - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.BOOLEAN(); - } - - @Override - public int hashCode() - { - Boolean value = asBoolean() ? Boolean.TRUE : Boolean.FALSE; - return value.hashCode(); - } - - private static class TrueValue extends BooleanValue { - - @Override - public Boolean asObject() - { - return Boolean.TRUE; - } - - @Override - public boolean asBoolean() - { - return true; - } - - @Override - public boolean isTrue() - { - return true; - } - - @Override - public boolean isFalse() - { - return false; - } - - @SuppressWarnings("EqualsWhichDoesntCheckParameterClass") - @Override - public boolean equals( Object obj ) - { - return obj == TRUE; - } - - @Override - public String toString() - { - return "TRUE"; - } - } - - private static class FalseValue extends BooleanValue - { - @Override - public Boolean asObject() - { - return Boolean.FALSE; - } - - @Override - public boolean asBoolean() - { - return false; - } - - @Override - public boolean isTrue() - { - return false; - } - - @Override - public boolean isFalse() - { - return true; - } - - @SuppressWarnings("EqualsWhichDoesntCheckParameterClass") - @Override - public boolean equals( Object obj ) - { - return obj == FALSE; - } - - @Override - public String toString() - { - return "FALSE"; - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/BytesValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/BytesValue.java deleted file mode 100644 index eef5ca1b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/BytesValue.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.util.Arrays; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Type; - -public class BytesValue extends ValueAdapter -{ - private final byte[] val; - - public BytesValue( byte[] val ) - { - if ( val == null ) - { - throw new IllegalArgumentException( "Cannot construct BytesValue from null" ); - } - this.val = val; - } - - @Override - public boolean isEmpty() - { - return val.length == 0; - } - - @Override - public int size() - { - return val.length; - } - - @Override - public byte[] asObject() - { - return val; - } - - @Override - public byte[] asByteArray() - { - return val; - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.BYTES(); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - BytesValue values = (BytesValue) o; - return Arrays.equals(val, values.val); - } - - @Override - public int hashCode() - { - return Arrays.hashCode(val); - } - - @Override - public String toString() - { - StringBuilder s = new StringBuilder("#"); - for (byte b : val) - { - if (b < 0x10) - { - s.append('0'); - } - s.append(Integer.toHexString(b)); - } - return s.toString(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/DateTimeValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/DateTimeValue.java deleted file mode 100644 index a645dde9..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/DateTimeValue.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.time.OffsetDateTime; -import java.time.ZonedDateTime; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Type; - -public class DateTimeValue extends ObjectValueAdapter -{ - public DateTimeValue( ZonedDateTime zonedDateTime ) - { - super( zonedDateTime ); - } - - @Override - public OffsetDateTime asOffsetDateTime() - { - return asZonedDateTime().toOffsetDateTime(); - } - - @Override - public ZonedDateTime asZonedDateTime() - { - return asObject(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.DATE_TIME(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/DateValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/DateValue.java deleted file mode 100644 index df122379..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/DateValue.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.time.LocalDate; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Type; - -public class DateValue extends ObjectValueAdapter -{ - public DateValue( LocalDate date ) - { - super( date ); - } - - @Override - public LocalDate asLocalDate() - { - return asObject(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.DATE(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/DurationValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/DurationValue.java deleted file mode 100644 index 97249667..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/DurationValue.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.IsoDuration; -import org.neo4j.driver.types.Type; - -public class DurationValue extends ObjectValueAdapter -{ - public DurationValue( IsoDuration duration ) - { - super( duration ); - } - - @Override - public IsoDuration asIsoDuration() - { - return asObject(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.DURATION(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/EntityValueAdapter.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/EntityValueAdapter.java deleted file mode 100644 index 36129d60..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/EntityValueAdapter.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.util.Map; - -import org.neo4j.driver.Value; -import org.neo4j.driver.types.Entity; -import java.util.function.Function; - -public abstract class EntityValueAdapter extends ObjectValueAdapter -{ - protected EntityValueAdapter( V adapted ) - { - super( adapted ); - } - - @Override - public V asEntity() - { - return asObject(); - } - - @Override - public Map asMap() - { - return asEntity().asMap(); - } - - @Override - public Map asMap( Function mapFunction ) - { - return asEntity().asMap( mapFunction ); - } - - @Override - public int size() - { - return asEntity().size(); - } - - @Override - public Iterable keys() - { - return asEntity().keys(); - } - - @Override - public Value get( String key ) - { - return asEntity().get( key ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/FloatValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/FloatValue.java deleted file mode 100644 index 38c25256..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/FloatValue.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.exceptions.value.LossyCoercion; -import org.neo4j.driver.types.Type; - -public class FloatValue extends NumberValueAdapter -{ - private final double val; - - public FloatValue( double val ) - { - this.val = val; - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.FLOAT(); - } - - @Override - public Double asNumber() - { - return val; - } - - @Override - public long asLong() - { - long longVal = (long) val; - if ((double) longVal != val) - { - throw new LossyCoercion( type().name(), "Java long" ); - } - - return longVal; - } - - @Override - public int asInt() - { - int intVal = (int) val; - if ((double) intVal != val) - { - throw new LossyCoercion( type().name(), "Java int" ); - } - - return intVal; - } - - @Override - public double asDouble() - { - return val; - } - - @Override - public float asFloat() - { - float floatVal = (float) val; - if ((double) floatVal != val) - { - throw new LossyCoercion( type().name(), "Java float" ); - } - - return floatVal; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - FloatValue values = (FloatValue) o; - return Double.compare( values.val, val ) == 0; - } - - @Override - public int hashCode() - { - long temp = Double.doubleToLongBits( val ); - return (int) (temp ^ (temp >>> 32)); - } - - @Override - public String toString() - { - return Double.toString( val ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/IntegerValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/IntegerValue.java deleted file mode 100644 index 7153e907..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/IntegerValue.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.exceptions.value.LossyCoercion; -import org.neo4j.driver.types.Type; - -public class IntegerValue extends NumberValueAdapter -{ - private final long val; - - public IntegerValue( long val ) - { - this.val = val; - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.INTEGER(); - } - - @Override - public Long asNumber() - { - return val; - } - - @Override - public long asLong() - { - return val; - } - - @Override - public int asInt() - { - if (val > Integer.MAX_VALUE || val < Integer.MIN_VALUE) - { - throw new LossyCoercion( type().name(), "Java int" ); - } - return (int) val; - } - - @Override - public double asDouble() - { - double doubleVal = (double) val; - if ( (long) doubleVal != val) - { - throw new LossyCoercion( type().name(), "Java double" ); - } - - return (double) val; - } - - @Override - public float asFloat() - { - return (float) val; - } - - @Override - public String toString() - { - return Long.toString( val ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - IntegerValue values = (IntegerValue) o; - return val == values.val; - } - - @Override - public int hashCode() - { - return (int) (val ^ (val >>> 32)); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/InternalValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/InternalValue.java deleted file mode 100644 index 72dcd7b4..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/InternalValue.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.AsValue; -import org.neo4j.driver.internal.types.TypeConstructor; -import org.neo4j.driver.Value; - -public interface InternalValue extends Value, AsValue -{ - TypeConstructor typeConstructor(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/ListValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/ListValue.java deleted file mode 100644 index 5e1764ee..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/ListValue.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.internal.util.Extract; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import org.neo4j.driver.types.Type; -import java.util.function.Function; - -import static org.neo4j.driver.Values.ofObject; - -public class ListValue extends ValueAdapter -{ - private final Value[] values; - - public ListValue( Value... values ) - { - if ( values == null ) - { - throw new IllegalArgumentException( "Cannot construct ListValue from null" ); - } - this.values = values; - } - - @Override - public boolean isEmpty() - { - return values.length == 0; - } - - @Override - public List asObject() - { - return asList( ofObject() ); - } - - @Override - public List asList() - { - return Extract.list( values, ofObject() ); - } - - @Override - public List asList( Function mapFunction ) - { - return Extract.list( values, mapFunction ); - } - - @Override - public int size() - { - return values.length; - } - - @Override - public Value get( int index ) - { - return index >= 0 && index < values.length ? values[index] : Values.NULL; - } - - @Override - public Iterable values( final Function mapFunction ) - { - return new Iterable() - { - @Override - public Iterator iterator() - { - return new Iterator() - { - private int cursor = 0; - - @Override - public boolean hasNext() - { - return cursor < values.length; - } - - @Override - public T next() - { - return mapFunction.apply( values[cursor++] ); - } - - @Override - public void remove() - { - } - }; - } - }; - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.LIST(); - } - - @Override - public String toString() - { - return Arrays.toString( values ); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - ListValue otherValues = (ListValue) o; - return Arrays.equals( values, otherValues.values ); - } - - @Override - public int hashCode() - { - return Arrays.hashCode( values ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/LocalDateTimeValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/LocalDateTimeValue.java deleted file mode 100644 index 1368b93b..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/LocalDateTimeValue.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.time.LocalDateTime; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Type; - -public class LocalDateTimeValue extends ObjectValueAdapter -{ - public LocalDateTimeValue( LocalDateTime localDateTime ) - { - super( localDateTime ); - } - - @Override - public LocalDateTime asLocalDateTime() - { - return asObject(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.LOCAL_DATE_TIME(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/LocalTimeValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/LocalTimeValue.java deleted file mode 100644 index 4dbf65e1..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/LocalTimeValue.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.time.LocalTime; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Type; - -public class LocalTimeValue extends ObjectValueAdapter -{ - public LocalTimeValue( LocalTime time ) - { - super( time ); - } - - @Override - public LocalTime asLocalTime() - { - return asObject(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.LOCAL_TIME(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/MapValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/MapValue.java deleted file mode 100644 index a202bfe5..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/MapValue.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.util.Map; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.internal.util.Extract; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import org.neo4j.driver.types.Type; -import java.util.function.Function; - -import static org.neo4j.driver.internal.util.Format.formatPairs; -import static org.neo4j.driver.Values.ofObject; -import static org.neo4j.driver.Values.ofValue; - -public class MapValue extends ValueAdapter -{ - private final Map val; - - public MapValue( Map val ) - { - if ( val == null ) - { - throw new IllegalArgumentException( "Cannot construct MapValue from null" ); - } - this.val = val; - } - - @Override - public boolean isEmpty() - { - return val.isEmpty(); - } - - @Override - public Map asObject() - { - return asMap( ofObject() ); - } - - @Override - public Map asMap() - { - return Extract.map( val, ofObject() ); - } - - @Override - public Map asMap( Function mapFunction ) - { - return Extract.map( val, mapFunction ); - } - - @Override - public int size() - { - return val.size(); - } - - @Override - public boolean containsKey( String key ) - { - return val.containsKey( key ); - } - - @Override - public Iterable keys() - { - return val.keySet(); - } - - @Override - public Iterable values() - { - return val.values(); - } - - @Override - public Iterable values( Function mapFunction ) - { - return Extract.map( val, mapFunction ).values(); - } - - @Override - public Value get( String key ) - { - Value value = val.get( key ); - return value == null ? Values.NULL: value; - } - - @Override - public String toString() - { - return formatPairs( asMap( ofValue() ) ); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.MAP(); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - - MapValue values = (MapValue) o; - return val.equals( values.val ); - } - - @Override - public int hashCode() - { - return val.hashCode(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/NodeValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/NodeValue.java deleted file mode 100644 index dfb49f08..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/NodeValue.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Node; -import org.neo4j.driver.types.Type; - -public class NodeValue extends EntityValueAdapter -{ - public NodeValue( Node adapted ) - { - super( adapted ); - } - - @Override - public Node asNode() - { - return asEntity(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.NODE(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/NullValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/NullValue.java deleted file mode 100644 index a0ac92c9..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/NullValue.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.Value; -import org.neo4j.driver.types.Type; - -public final class NullValue extends ValueAdapter -{ - public static final Value NULL = new NullValue(); - - private NullValue() - { - } - - @Override - public boolean isNull() - { - return true; - } - - @Override - public Object asObject() - { - return null; - } - - @Override - public String asString() - { - return "null"; - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.NULL(); - } - - @SuppressWarnings("EqualsWhichDoesntCheckParameterClass") - @Override - public boolean equals( Object obj ) - { - return obj == NULL; - } - - @Override - public int hashCode() - { - return 0; - } - - @Override - public String toString() - { - return "NULL"; - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/NumberValueAdapter.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/NumberValueAdapter.java deleted file mode 100644 index 6488b8fa..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/NumberValueAdapter.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -public abstract class NumberValueAdapter extends ValueAdapter -{ - @Override - public final V asObject() - { - return asNumber(); - } - - @Override - public abstract V asNumber(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/ObjectValueAdapter.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/ObjectValueAdapter.java deleted file mode 100644 index 2dcbf105..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/ObjectValueAdapter.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.util.Objects; - -import static java.lang.String.format; - -public abstract class ObjectValueAdapter extends ValueAdapter -{ - private final V adapted; - - protected ObjectValueAdapter( V adapted ) - { - if ( adapted == null ) - { - throw new IllegalArgumentException( format( "Cannot construct %s from null", getClass().getSimpleName() ) ); - } - this.adapted = adapted; - } - - @Override - public final V asObject() - { - return adapted; - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - ObjectValueAdapter that = (ObjectValueAdapter) o; - return Objects.equals( adapted, that.adapted ); - } - - @Override - public int hashCode() - { - return adapted.hashCode(); - } - - @Override - public String toString() - { - return adapted.toString(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/PathValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/PathValue.java deleted file mode 100644 index 4781f5ca..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/PathValue.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Path; -import org.neo4j.driver.types.Type; - -public class PathValue extends ObjectValueAdapter -{ - public PathValue( Path adapted ) - { - super( adapted ); - } - - @Override - public Path asPath() - { - return asObject(); - } - - @Override - public int size() - { - return asObject().length(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.PATH(); - } - -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/PointValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/PointValue.java deleted file mode 100644 index 62e81971..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/PointValue.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Point; -import org.neo4j.driver.types.Type; - -public class PointValue extends ObjectValueAdapter -{ - public PointValue( Point point ) - { - super( point ); - } - - @Override - public Point asPoint() - { - return asObject(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.POINT(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/RelationshipValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/RelationshipValue.java deleted file mode 100644 index 651222de..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/RelationshipValue.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Relationship; -import org.neo4j.driver.types.Type; - -public class RelationshipValue extends EntityValueAdapter -{ - public RelationshipValue( Relationship adapted ) - { - super( adapted ); - } - - @Override - public Relationship asRelationship() - { - return asEntity(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.RELATIONSHIP(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/StringValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/StringValue.java deleted file mode 100644 index dc71f7eb..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/StringValue.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.util.Objects; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Type; - -public class StringValue extends ValueAdapter -{ - private final String val; - - public StringValue( String val ) - { - if ( val == null ) - { - throw new IllegalArgumentException( "Cannot construct StringValue from null" ); - } - this.val = val; - } - - @Override - public boolean isEmpty() - { - return val.isEmpty(); - } - - @Override - public int size() - { - return val.length(); - } - - @Override - public String asObject() - { - return asString(); - } - - @Override - public String asString() - { - return val; - } - - @Override - public String toString() - { - return String.format( "\"%s\"", val.replace( "\"", "\\\"" ) ); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.STRING(); - } - - @Override - public boolean equals( Object o ) - { - if ( this == o ) - { - return true; - } - if ( o == null || getClass() != o.getClass() ) - { - return false; - } - StringValue that = (StringValue) o; - return Objects.equals( val, that.val ); - } - - @Override - public int hashCode() - { - return val.hashCode(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/internal/value/TimeValue.java b/src/graiph-driver/java/org/neo4j/driver/internal/value/TimeValue.java deleted file mode 100644 index beb4980e..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/internal/value/TimeValue.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.internal.value; - -import java.time.OffsetTime; - -import org.neo4j.driver.internal.types.InternalTypeSystem; -import org.neo4j.driver.types.Type; - -public class TimeValue extends ObjectValueAdapter -{ - public TimeValue( OffsetTime time ) - { - super( time ); - } - - @Override - public OffsetTime asOffsetTime() - { - return asObject(); - } - - @Override - public Type type() - { - return InternalTypeSystem.TYPE_SYSTEM.TIME(); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/net/ServerAddress.java b/src/graiph-driver/java/org/neo4j/driver/net/ServerAddress.java deleted file mode 100644 index 96fff9fa..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/net/ServerAddress.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.net; - -import org.neo4j.driver.internal.BoltServerAddress; - -/** - * Represents a host and port. Host can either be an IP address or a DNS name. - * Both IPv4 and IPv6 hosts are supported. - */ -public interface ServerAddress -{ - /** - * Retrieve the host portion of this {@link ServerAddress}. - * - * @return the host, never {@code null}. - */ - String host(); - - /** - * Retrieve the port portion of this {@link ServerAddress}. - * - * @return the port, always in range [0, 65535]. - */ - int port(); - - /** - * Create a new address with the given host and port. - * - * @param host the host portion. Should not be {@code null}. - * @param port the port portion. Should be in range [0, 65535]. - * @return new server address with the specified host and port. - */ - static ServerAddress of( String host, int port ) - { - return new BoltServerAddress( host, host, port ); - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/net/ServerAddressResolver.java b/src/graiph-driver/java/org/neo4j/driver/net/ServerAddressResolver.java deleted file mode 100644 index b82d16bd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/net/ServerAddressResolver.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.net; - -import java.util.Set; - -/** - * A resolver function used by the routing driver to resolve the initial address used to create the driver. - */ -@FunctionalInterface -public interface ServerAddressResolver -{ - /** - * Resolve the given address to a set of other addresses. - * Exceptions thrown by this method will be logged and driver will continue using the original address. - * - * @param address the address to resolve. - * @return new set of addresses. - */ - Set resolve( ServerAddress address ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/reactive/RxResult.java b/src/graiph-driver/java/org/neo4j/driver/reactive/RxResult.java deleted file mode 100644 index 2ccf52a9..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/reactive/RxResult.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.reactive; - -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.summary.ResultSummary; - -/** - * A reactive result provides a reactive way to execute query on the server and receives records back. - * This reactive result consists of a result key publisher, a record publisher and a result summary publisher. - * The reactive result is created via {@link RxSession#run(Statement)} and {@link RxTransaction#run(Statement)} for example. - * On the creation of the result, the query submitted to create this result will not be executed until one of the publishers in this class is subscribed. - * The records or the summary stream has to be consumed and finished (completed or errored) to ensure the resources used by this result to be freed correctly. - * - * @see Publisher - * @see Subscriber - * @see Subscription - * @since 2.0 - */ -public interface RxResult -{ - /** - * Returns a cold publisher of keys. - *

- * When this publisher is {@linkplain Publisher#subscribe(Subscriber) subscribed}, the query statement is sent to the server and get executed. - * This method does not start the record streaming nor publish query execution error. - * To retrieve the execution result, either {@link #records()} or {@link #summary()} can be used. - * {@link #records()} starts record streaming and reports query execution error. - * {@link #summary()} skips record streaming and directly reports query execution error. - *

- * Consuming of execution result ensures the resources (such as network connections) used by this result is freed correctly. - * Consuming the keys without consuming the execution result will result in resource leak. - * To avoid the resource leak, {@link RxSession#close()} (and/or {@link RxTransaction#commit()} and {@link RxTransaction#rollback()}) shall be invoked - * and subscribed to enforce the result resources created in the {@link RxSession} (and/or {@link RxTransaction}) to be freed correctly. - *

- * This publisher can be subscribed many times. The keys published stays the same as the keys are buffered. - * If this publisher is subscribed after the publisher of {@link #records()} or {@link #summary()}, - * then the buffered keys will be returned. - * @return a cold publisher of keys. - */ - Publisher keys(); - - /** - * Returns a cold unicast publisher of records. - *

- * When the record publisher is {@linkplain Publisher#subscribe(Subscriber) subscribed}, - * the query statement is executed and the query result is streamed back as a record stream followed by a result summary. - * This record publisher publishes all records in the result and signals the completion. - * However before completion or error reporting if any, a cleanup of result resources such as network connection will be carried out automatically. - *

- * Therefore the {@link Subscriber} of this record publisher shall wait for the termination signal (complete or error) - * to ensure that the resources used by this result are released correctly. - * Then the session is ready to be used to run more queries. - *

- * Cancelling of the record streaming will immediately terminate the propagation of new records. - * But it will not cancel the query execution. - * As a result, a termination signal (complete or error) will still be sent to the {@link Subscriber} after the query execution is finished. - *

- * The record publishing event by default runs in an Network IO thread, as a result no blocking operation is allowed in this thread. - * Otherwise network IO might be blocked by application logic. - *

- * This publisher can only be subscribed by one {@link Subscriber} once. - *

- * If this publisher is subscribed after {@link #keys()}, then the publish of records is carried out after the arrival of keys. - * If this publisher is subscribed after {@link #summary()}, then the publish of records is already cancelled - * and an empty publisher of zero record will be return. - * @return a cold unicast publisher of records. - */ - Publisher records(); - - /** - * Returns a cold publisher of result summary which arrives after all records. - *

- * {@linkplain Publisher#subscribe(Subscriber) Subscribing} the summary publisher results in the execution of the query followed by the result summary returned. - * The summary publisher cancels record publishing if not yet subscribed and directly streams back the summary on query execution completion. - * As a result, the invocation of {@link #records()} after this method, would receive an empty publisher. - *

- * If subscribed after {@link #keys()}, then the result summary will be published after the query execution without streaming any record to client. - * If subscribed after {@link #records()}, then the result summary will be published after the query execution and the streaming of records. - *

- * Usually, this method shall be chained after {@link #records()} to ensure that all records are processed before summary. - *

- * This method can be subscribed multiple times. When the {@linkplain ResultSummary summary} arrives, it will be buffered locally for all subsequent calls. - * @return a cold publisher of result summary which only arrives after all records. - */ - Publisher summary(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/reactive/RxSession.java b/src/graiph-driver/java/org/neo4j/driver/reactive/RxSession.java deleted file mode 100644 index 9c04f060..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/reactive/RxSession.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.reactive; - -import org.reactivestreams.Publisher; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -import org.neo4j.driver.AccessMode; -import org.neo4j.driver.Session; -import org.neo4j.driver.Statement; -import org.neo4j.driver.TransactionConfig; -import org.neo4j.driver.Values; - -/** - * A reactive session is the same as {@link Session} except it provides a reactive API. - * @see Session - * @see RxResult - * @see RxTransaction - * @see Publisher - * @since 2.0 - */ -public interface RxSession extends RxStatementRunner -{ - /** - * Begin a new explicit {@linkplain RxTransaction transaction}. At - * most one transaction may exist in a session at any point in time. To - * maintain multiple concurrent transactions, use multiple concurrent - * sessions. - *

- * It by default is executed in a Network IO thread, as a result no blocking operation is allowed in this thread. - * - * @return a new {@link RxTransaction} - */ - Publisher beginTransaction(); - - /** - * Begin a new explicit {@linkplain RxTransaction transaction} with the specified {@link TransactionConfig configuration}. - * At most one transaction may exist in a session at any point in time. To - * maintain multiple concurrent transactions, use multiple concurrent sessions. - *

- * It by default is executed in a Network IO thread, as a result no blocking operation is allowed in this thread. - * - * @param config configuration for the new transaction. - * @return a new {@link RxTransaction} - */ - Publisher beginTransaction( TransactionConfig config ); - - /** - * Execute given unit of reactive work in a {@link AccessMode#READ read} reactive transaction. -

- * Transaction will automatically be committed unless given unit of work fails or - * {@link RxTransaction#commit() transaction commit} fails. - * It will also not be committed if explicitly rolled back via {@link RxTransaction#rollback()}. - *

- * Returned publisher and given {@link RxTransactionWork} is completed/executed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned publisher and do not use them inside the - * {@link RxTransactionWork}. - * - * @param work the {@link RxTransactionWork} to be applied to a new read transaction. - * Operation executed by the given work must NOT include any blocking operation. - * @param the return type of the given unit of work. - * @return a {@link Publisher publisher} completed with the same result as returned by the given unit of work. - * publisher can be completed exceptionally if given work or commit fails. - * - */ - Publisher readTransaction( RxTransactionWork> work ); - - /** - * Execute given unit of reactive work in a {@link AccessMode#READ read} reactive transaction with - * the specified {@link TransactionConfig configuration}. -

- * Transaction will automatically be committed unless given unit of work fails or - * {@link RxTransaction#commit() transaction commit} fails. - * It will also not be committed if explicitly rolled back via {@link RxTransaction#rollback()}. - *

- * Returned publisher and given {@link RxTransactionWork} is completed/executed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned publisher and do not use them inside the - * {@link RxTransactionWork}. - * - * @param work the {@link RxTransactionWork} to be applied to a new read transaction. - * Operation executed by the given work must NOT include any blocking operation. - * @param the return type of the given unit of work. - * @return a {@link Publisher publisher} completed with the same result as returned by the given unit of work. - * publisher can be completed exceptionally if given work or commit fails. - * - */ - Publisher readTransaction( RxTransactionWork> work, TransactionConfig config ); - - /** - * Execute given unit of reactive work in a {@link AccessMode#WRITE write} reactive transaction. -

- * Transaction will automatically be committed unless given unit of work fails or - * {@link RxTransaction#commit() transaction commit} fails. - * It will also not be committed if explicitly rolled back via {@link RxTransaction#rollback()}. - *

- * Returned publisher and given {@link RxTransactionWork} is completed/executed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned publisher and do not use them inside the - * {@link RxTransactionWork}. - * - * @param work the {@link RxTransactionWork} to be applied to a new read transaction. - * Operation executed by the given work must NOT include any blocking operation. - * @param the return type of the given unit of work. - * @return a {@link Publisher publisher} completed with the same result as returned by the given unit of work. - * publisher can be completed exceptionally if given work or commit fails. - * - */ - Publisher writeTransaction( RxTransactionWork> work ); - - /** - * Execute given unit of reactive work in a {@link AccessMode#WRITE write} reactive transaction with - * the specified {@link TransactionConfig configuration}. -

- * Transaction will automatically be committed unless given unit of work fails or - * {@link RxTransaction#commit() transaction commit} fails. - * It will also not be committed if explicitly rolled back via {@link RxTransaction#rollback()}. - *

- * Returned publisher and given {@link RxTransactionWork} is completed/executed by an IO thread which should never block. - * Otherwise IO operations on this and potentially other network connections might deadlock. - * Please do not chain blocking operations like {@link CompletableFuture#get()} on the returned publisher and do not use them inside the - * {@link RxTransactionWork}. - * - * @param work the {@link RxTransactionWork} to be applied to a new read transaction. - * Operation executed by the given work must NOT include any blocking operation. - * @param the return type of the given unit of work. - * @return a {@link Publisher publisher} completed with the same result as returned by the given unit of work. - * publisher can be completed exceptionally if given work or commit fails. - * - */ - Publisher writeTransaction( RxTransactionWork> work, TransactionConfig config ); - - /** - * Run a statement with parameters in an auto-commit transaction with specified {@link TransactionConfig} and return a reactive result stream. - * The statement is not executed when the reactive result is returned. - * Instead, the publishers in the result will actually start the execution of the statement. - * - * @param statement text of a Neo4j statement. - * @param config configuration for the new transaction. - * @return a reactive result. - */ - RxResult run( String statement, TransactionConfig config ); - - /** - * Run a statement with parameters in an auto-commit transaction with specified {@link TransactionConfig} and return a reactive result stream. - * The statement is not executed when the reactive result is returned. - * Instead, the publishers in the result will actually start the execution of the statement. - *

- * This method takes a set of parameters that will be injected into the statement by Neo4j. - * Using parameters is highly encouraged, it helps avoid dangerous cypher injection attacks - * and improves database performance as Neo4j can re-use query plans more often. - *

- * This version of run takes a {@link Map} of parameters. - * The values in the map must be values that can be converted to Neo4j types. - * See {@link Values#parameters(Object...)} for a list of allowed types. - * - *

Example

- *
-     * {@code
-     * Map metadata = new HashMap<>();
-     * metadata.put("type", "update name");
-     *
-     * TransactionConfig config = TransactionConfig.builder()
-     *                 .withTimeout(Duration.ofSeconds(3))
-     *                 .withMetadata(metadata)
-     *                 .build();
-     *
-     * Map parameters = new HashMap<>();
-     * parameters.put("myNameParam", "Bob");
-     *
-     * RxResult result = rxSession.run("MATCH (n) WHERE n.name = {myNameParam} RETURN (n)", parameters, config);
-     * }
-     * 
- * - * @param statement text of a Neo4j statement. - * @param parameters input data for the statement. - * @param config configuration for the new transaction. - * @return a reactive result. - */ - RxResult run( String statement, Map parameters, TransactionConfig config ); - - /** - * Run a statement in an auto-commit transaction with specified {@link TransactionConfig configuration} and return a reactive result stream. - * The statement is not executed when the reactive result is returned. - * Instead, the publishers in the result will actually start the execution of the statement. - *

Example

- *
-     * {@code
-     * Map metadata = new HashMap<>();
-     * metadata.put("type", "update name");
-     *
-     * TransactionConfig config = TransactionConfig.builder()
-     *                 .withTimeout(Duration.ofSeconds(3))
-     *                 .withMetadata(metadata)
-     *                 .build();
-     *
-     * Statement statement = new Statement("MATCH (n) WHERE n.name=$myNameParam RETURN n.age");
-     * RxResult result = rxSession.run(statement.withParameters(Values.parameters("myNameParam", "Bob")));
-     * }
-     * 
- * - * @param statement a Neo4j statement. - * @param config configuration for the new transaction. - * @return a reactive result. - */ - RxResult run( Statement statement, TransactionConfig config ); - - /** - * Return the bookmark received following the last completed statement within this session. - * The last completed statement can be run in a {@linkplain RxTransaction transaction} - * started using {@linkplain #beginTransaction() beginTransaction} or directly via {@link #run(Statement) run}. - * - * @return a reference to a previous transaction. - */ - String lastBookmark(); - - /** - * Signal that you are done using this session. - * In the default driver usage, closing and accessing sessions is very low cost. - *

- * This operation is not needed if 1) all results created in the session have been fully consumed and - * 2) all transactions opened by this session have been either committed or rolled back. - *

- * This method is a fallback if you failed to fulfill the two requirements above. - * This publisher is completed when all outstanding statements in the session have completed, - * meaning any writes you performed are guaranteed to be durably stored. - * It might be completed exceptionally when there are unconsumed errors from previous statements or transactions. - * - * @param makes it easier to be chained. - * @return an empty publisher that represents the reactive close. - */ - Publisher close(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/reactive/RxStatementRunner.java b/src/graiph-driver/java/org/neo4j/driver/reactive/RxStatementRunner.java deleted file mode 100644 index f7c97f96..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/reactive/RxStatementRunner.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.reactive; - -import java.util.Map; - -import org.neo4j.driver.Record; -import org.neo4j.driver.Statement; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; - -/** - * Common interface for components that can execute Neo4j statements using Reactive API. - * @see RxSession - * @see RxTransaction - * @since 2.0 - */ -public interface RxStatementRunner -{ - /** - * Register running of a statement and return a reactive result stream. - * The statement is not executed when the reactive result is returned. - * Instead, the publishers in the result will actually start the execution of the statement. - * - * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - * - * This particular method takes a {@link Value} as its input. This is useful - * if you want to take a map-like value that you've gotten from a prior result - * and send it back as parameters. - * - * If you are creating parameters programmatically, {@link #run(String, Map)} - * might be more helpful, it converts your map to a {@link Value} for you. - * - * @param statementTemplate text of a Neo4j statement - * @param parameters input parameters, should be a map Value, see {@link Values#parameters(Object...)}. - * @return a reactive result. - */ - RxResult run( String statementTemplate, Value parameters ); - - /** - * Register running of a statement and return a reactive result stream. - * The statement is not executed when the reactive result is returned. - * Instead, the publishers in the result will actually start the execution of the statement. - * - * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - * - * This version of run takes a {@link Map} of parameters. The values in the map - * must be values that can be converted to Neo4j types. See {@link Values#parameters(Object...)} for - * a list of allowed types. - * - * @param statementTemplate text of a Neo4j statement - * @param statementParameters input data for the statement - * @return a reactive result. - */ - RxResult run( String statementTemplate, Map statementParameters ); - - /** - * Register running of a statement and return a reactive result stream. - * The statement is not executed when the reactive result is returned. - * Instead, the publishers in the result will actually start the execution of the statement. - * - * This method takes a set of parameters that will be injected into the - * statement by Neo4j. Using parameters is highly encouraged, it helps avoid - * dangerous cypher injection attacks and improves database performance as - * Neo4j can re-use query plans more often. - * - * This version of run takes a {@link Record} of parameters, which can be useful - * if you want to use the output of one statement as input for another. - * - * @param statementTemplate text of a Neo4j statement - * @param statementParameters input data for the statement - * @return a reactive result. - */ - RxResult run( String statementTemplate, Record statementParameters ); - - /** - * Register running of a statement and return a reactive result stream. - * The statement is not executed when the reactive result is returned. - * Instead, the publishers in the result will actually start the execution of the statement. - * - * @param statementTemplate text of a Neo4j statement - * @return a reactive result. - */ - RxResult run( String statementTemplate ); - - /** - * Register running of a statement and return a reactive result stream. - * The statement is not executed when the reactive result is returned. - * Instead, the publishers in the result will actually start the execution of the statement. - * - * @param statement a Neo4j statement - * @return a reactive result. - */ - RxResult run( Statement statement ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/reactive/RxTransaction.java b/src/graiph-driver/java/org/neo4j/driver/reactive/RxTransaction.java deleted file mode 100644 index b5b239df..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/reactive/RxTransaction.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.reactive; - -import org.reactivestreams.Publisher; - -import org.neo4j.driver.Transaction; - -/** - * Same as {@link Transaction} except this reactive transaction exposes a reactive API. - * @see Transaction - * @see RxSession - * @see Publisher - * @since 2.0 - */ -public interface RxTransaction extends RxStatementRunner -{ - /** - * Commits the transaction. - * It completes without publishing anything if transaction is committed successfully. - * Otherwise, errors when there is any error to commit. - * @param makes it easier to be chained after other publishers. - * @return an empty publisher. - */ - Publisher commit(); - - /** - * Rolls back the transaction. - * It completes without publishing anything if transaction is rolled back successfully. - * Otherwise, errors when there is any error to roll back. - * @param makes it easier to be chained after other publishers. - * @return an empty publisher. - */ - Publisher rollback(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/reactive/RxTransactionWork.java b/src/graiph-driver/java/org/neo4j/driver/reactive/RxTransactionWork.java deleted file mode 100644 index 449607c7..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/reactive/RxTransactionWork.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.reactive; - -/** - * Callback that executes operations against a given {@link RxTransaction}. - * To be used with {@link RxSession#readTransaction(RxTransactionWork)} and - * {@link RxSession#writeTransaction(RxTransactionWork)} methods. - * - * @param the return type of this work. - * @since 2.0 - */ -public interface RxTransactionWork -{ - /** - * Executes all given operations against the same transaction. - * - * @param tx the transaction to use. - * @return some result object or {@code null} if none. - */ - T execute( RxTransaction tx ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/summary/InputPosition.java b/src/graiph-driver/java/org/neo4j/driver/summary/InputPosition.java deleted file mode 100644 index 3314f205..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/summary/InputPosition.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.summary; - -import org.neo4j.driver.util.Immutable; - -/** - * An input position refers to a specific character in a statement. - * @since 1.0 - */ -@Immutable -public interface InputPosition -{ - /** - * The character offset referred to by this position; offset numbers start at 0. - * - * @return the offset of this position. - */ - int offset(); - - /** - * The line number referred to by the position; line numbers start at 1. - * - * @return the line number of this position. - */ - int line(); - - /** - * The column number referred to by the position; column numbers start at 1. - * - * @return the column number of this position. - */ - int column(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/summary/Notification.java b/src/graiph-driver/java/org/neo4j/driver/summary/Notification.java deleted file mode 100644 index 8efc27e1..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/summary/Notification.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.summary; - -import org.neo4j.driver.util.Immutable; - -/** - * Representation for notifications found when executing a statement. - * - * A notification can be visualized in a client pinpointing problems or other information about the statement. - * @since 1.0 - */ -@Immutable -public interface Notification -{ - /** - * Returns a notification code for the discovered issue. - * @return the notification code - */ - String code(); - - /** - * Returns a short summary of the notification. - * @return the title of the notification. - */ - String title(); - - /** - * Returns a longer description of the notification. - * @return the description of the notification. - */ - String description(); - - /** - * The position in the statement where this notification points to. - * Not all notifications have a unique position to point to and in that case the position would be set to null. - * - * @return the position in the statement where the issue was found, or null if no position is associated with this - * notification. - */ - InputPosition position(); - - /** - * The severity level of the notification. - * - * @return the severity level of the notification - */ - String severity(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/summary/Plan.java b/src/graiph-driver/java/org/neo4j/driver/summary/Plan.java deleted file mode 100644 index f3725726..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/summary/Plan.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.summary; - -import java.util.List; -import java.util.Map; - -import org.neo4j.driver.Value; -import org.neo4j.driver.util.Immutable; - -/** - * This describes the plan that the database planner produced and used (or will use) to execute your statement. - * This can be extremely helpful in understanding what a statement is doing, and how to optimize it. For more - * details, see the Neo4j Manual. - * - * The plan for the statement is a tree of plans - each sub-tree containing zero or more child plans. The statement - * starts with the root plan. Each sub-plan is of a specific {@link #operatorType() operator type}, which describes - * what that part of the plan does - for instance, perform an index lookup or filter results. The Neo4j Manual contains - * a reference of the available operator types, and these may differ across Neo4j versions. - * - * For a simple view of a plan, the {@code toString} method will give a human-readable rendering of the tree. - * @since 1.0 - */ -@Immutable -public interface Plan -{ - /** - * @return the operation this plan is performing. - */ - String operatorType(); - - /** - * Many {@link #operatorType() operators} have arguments defining their specific behavior. This map contains - * those arguments. - * - * @return the arguments for the {@link #operatorType() operator} used. - */ - Map arguments(); - - /** - * Identifiers used by this part of the plan. These can be both identifiers introduce by you, or automatically - * generated identifiers. - * @return a list of identifiers used by this plan. - */ - List identifiers(); - - /** - * As noted in the class-level javadoc, a plan is a tree, where each child is another plan. The children are where - * this part of the plan gets its input records - unless this is an {@link #operatorType() operator} that introduces - * new records on its own. - * @return zero or more child plans. - */ - List children(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/summary/ProfiledPlan.java b/src/graiph-driver/java/org/neo4j/driver/summary/ProfiledPlan.java deleted file mode 100644 index d354c715..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/summary/ProfiledPlan.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.summary; - -import java.util.List; - -/** - * This is the same as a regular {@link Plan} - except this plan has been executed, meaning it also contains detailed information about how much work each - * step of the plan incurred on the database. - * @since 1.0 - */ -public interface ProfiledPlan extends Plan -{ - /** - * @return the number of times this part of the plan touched the underlying data stores - */ - long dbHits(); - - /** - * @return the number of records this part of the plan produced - */ - long records(); - - @Override - List children(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/summary/ResultSummary.java b/src/graiph-driver/java/org/neo4j/driver/summary/ResultSummary.java deleted file mode 100644 index 9915616f..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/summary/ResultSummary.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.summary; - -import java.util.List; -import java.util.concurrent.TimeUnit; - -import org.neo4j.driver.Statement; -import org.neo4j.driver.util.Immutable; - -/** - * The result summary of running a statement. The result summary interface can be used to investigate - * details about the result, like the type of query run, how many and which kinds of updates have been executed, - * and query plan and profiling information if available. - * - * The result summary is only available after all result records have been consumed. - * - * Keeping the result summary around does not influence the lifecycle of any associated session and/or transaction. - * @since 1.0 - */ -@Immutable -public interface ResultSummary -{ - /** - * @return statement that has been executed - */ - Statement statement(); - - /** - * @return counters for operations the statement triggered - */ - SummaryCounters counters(); - - /** - * @return type of statement that has been executed - */ - StatementType statementType(); - - /** - * @return true if the result contained a statement plan, i.e. is the summary of a Cypher "PROFILE" or "EXPLAIN" statement - */ - boolean hasPlan(); - - /** - * @return true if the result contained profiling information, i.e. is the summary of a Cypher "PROFILE" statement - */ - boolean hasProfile(); - - /** - * This describes how the database will execute your statement. - * - * @return statement plan for the executed statement if available, otherwise null - */ - Plan plan(); - - /** - * This describes how the database did execute your statement. - * - * If the statement you executed {@link #hasProfile() was profiled}, the statement plan will contain detailed - * information about what each step of the plan did. That more in-depth version of the statement plan becomes - * available here. - * - * @return profiled statement plan for the executed statement if available, otherwise null - */ - ProfiledPlan profile(); - - /** - * A list of notifications that might arise when executing the statement. - * Notifications can be warnings about problematic statements or other valuable information that can be presented - * in a client. - * - * Unlike failures or errors, notifications do not affect the execution of a statement. - * - * @return a list of notifications produced while executing the statement. The list will be empty if no - * notifications produced while executing the statement. - */ - List notifications(); - - /** - * The time it took the server to make the result available for consumption. - * - * @param unit The unit of the duration. - * @return The time it took for the server to have the result available in the provided time unit. - */ - long resultAvailableAfter( TimeUnit unit ); - - /** - * The time it took the server to consume the result. - * - * @param unit The unit of the duration. - * @return The time it took for the server to consume the result in the provided time unit. - */ - long resultConsumedAfter( TimeUnit unit ); - - /** - * The basic information of the server where the result is obtained from - * @return basic information of the server where the result is obtain from - */ - ServerInfo server(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/summary/ServerInfo.java b/src/graiph-driver/java/org/neo4j/driver/summary/ServerInfo.java deleted file mode 100644 index fdd3832a..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/summary/ServerInfo.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.summary; - -/** - * Provides some basic information of the server where the result is obtained from. - */ -public interface ServerInfo -{ - - /** - * Returns a string telling the address of the server the query was executed. - * @return The address of the server the query was executed. - */ - String address(); - - /** - * Returns a string telling which version of the server the query was executed. - * Supported since neo4j 3.1. - * @return The server version of null if not available. - */ - String version(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/summary/StatementType.java b/src/graiph-driver/java/org/neo4j/driver/summary/StatementType.java deleted file mode 100644 index cd29c7c6..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/summary/StatementType.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.summary; - -import org.neo4j.driver.exceptions.ClientException; - -/** - * The type of statement executed. - * @since 1.0 - */ -public enum StatementType -{ - READ_ONLY, - READ_WRITE, - WRITE_ONLY, - SCHEMA_WRITE; - - public static StatementType fromCode( String type ) - { - switch ( type ) - { - case "r": - return StatementType.READ_ONLY; - case "rw": - return StatementType.READ_WRITE; - case "w": - return StatementType.WRITE_ONLY; - case "s": - return StatementType.SCHEMA_WRITE; - default: - throw new ClientException( "Unknown statement type: `" + type + "`." ); - } - } -} diff --git a/src/graiph-driver/java/org/neo4j/driver/summary/SummaryCounters.java b/src/graiph-driver/java/org/neo4j/driver/summary/SummaryCounters.java deleted file mode 100644 index 291755ea..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/summary/SummaryCounters.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.summary; - -import org.neo4j.driver.util.Immutable; - -/** - * Contains counters for various operations that a statement triggered. - * @since 1.0 - */ -@Immutable -public interface SummaryCounters -{ - /** - * Whether there were any updates at all, eg. any of the counters are greater than 0. - * @return true if the statement made any updates - */ - boolean containsUpdates(); - - /** - * @return number of nodes created. - */ - int nodesCreated(); - - /** - * @return number of nodes deleted. - */ - int nodesDeleted(); - - /** - * @return number of relationships created. - */ - int relationshipsCreated(); - - /** - * @return number of relationships deleted. - */ - int relationshipsDeleted(); - - /** - * @return number of properties (on both nodes and relationships) set. - */ - int propertiesSet(); - - /** - * @return number of labels added to nodes. - */ - int labelsAdded(); - - /** - * @return number of labels removed from nodes. - */ - int labelsRemoved(); - - /** - * @return number of indexes added to the schema. - */ - int indexesAdded(); - - /** - * @return number of indexes removed from the schema. - */ - int indexesRemoved(); - - /** - * @return number of constraints added to the schema. - */ - int constraintsAdded(); - - /** - * @return number of constraints removed from the schema. - */ - int constraintsRemoved(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/Entity.java b/src/graiph-driver/java/org/neo4j/driver/types/Entity.java deleted file mode 100644 index 57b5076d..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/Entity.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -import org.neo4j.driver.util.Immutable; - -/** - * A uniquely identifiable property container that can form part of a Neo4j graph. - * @since 1.0 - */ -@Immutable -public interface Entity extends MapAccessor -{ - /** - * A unique id for this Entity. Ids are guaranteed to remain stable for the duration of the session they - * were found in, but may be re-used for other entities after that. As such, if you want a public identity to use - * for your entities, attaching an explicit 'id' property or similar persistent and unique identifier is a better - * choice. - * - * @return the id of this entity - */ - long id(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/IsoDuration.java b/src/graiph-driver/java/org/neo4j/driver/types/IsoDuration.java deleted file mode 100644 index df1b3372..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/IsoDuration.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -import java.time.temporal.TemporalAmount; - -import org.neo4j.driver.Values; -import org.neo4j.driver.util.Immutable; - -/** - * Represents temporal amount containing months, days, seconds and nanoseconds of the second. A duration can be negative. - *

- * Value that represents a duration can be created using {@link Values#isoDuration(long, long, long, int)} method. - */ -@Immutable -public interface IsoDuration extends TemporalAmount -{ - /** - * Retrieve amount of months in this duration. - * - * @return number of months. - */ - long months(); - - /** - * Retrieve amount of days in this duration. - * - * @return number of days. - */ - long days(); - - /** - * Retrieve amount of seconds in this duration. - * - * @return number of seconds. - */ - long seconds(); - - /** - * Retrieve amount of nanoseconds of the second in this duration. - * - * @return number of nanoseconds. - */ - int nanoseconds(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/MapAccessor.java b/src/graiph-driver/java/org/neo4j/driver/types/MapAccessor.java deleted file mode 100644 index 014ee178..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/MapAccessor.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -import java.util.Map; - -import org.neo4j.driver.internal.value.NullValue; -import org.neo4j.driver.Value; -import org.neo4j.driver.Values; -import org.neo4j.driver.exceptions.ClientException; -import java.util.function.Function; - -/** - * Access the keys, properties and values of an underlying unordered map by key - * - * This provides only read methods. Subclasses may chose to provide additional methods - * for changing the underlying map. - * @since 1.0 - */ -public interface MapAccessor -{ - /** - * Retrieve the keys of the underlying map - * - * @return all map keys in unspecified order - */ - Iterable keys(); - - /** - * Check if the list of keys contains the given key - * - * @param key the key - * @return {@code true} if this map keys contains the given key otherwise {@code false} - */ - boolean containsKey( String key ); - - /** - * Retrieve the value of the property with the given key - * - * @param key the key of the property - * @return the property's value or a {@link NullValue} if no such key exists - * @throws ClientException if record has not been initialized - */ - Value get( String key ); - - /** - * Retrieve the number of entries in this map - * - * @return the number of entries in this map - */ - int size(); - - /** - * Retrieve all values of the underlying collection - * - * @return all values in unspecified order - */ - Iterable values(); - - /** - * Map and retrieve all values of the underlying collection - * - * @param mapFunction a function to map from Value to T. See {@link Values} for some predefined functions, such - * as {@link Values#ofBoolean()}, {@link Values#ofList(Function)}. - * @param the target type of mapping - * @return the result of mapping all values in unspecified order - */ - Iterable values( Function mapFunction ); - - /** - * Return the underlying map as a map of string keys and values converted using - * {@link Value#asObject()}. - * - * This is equivalent to calling {@link #asMap(Function)} with {@link Values#ofObject()}. - * - * @return the value as a Java map - */ - Map asMap(); - - /** - * @param mapFunction a function to map from Value to T. See {@link Values} for some predefined functions, such - * as {@link Values#ofBoolean()}, {@link Values#ofList(Function)}. - * @param the type of map values - * @see Values for a long list of built-in conversion functions - * @return the value as a map from string keys to values of type T obtained from mapping he original map values, if possible - */ - Map asMap( Function mapFunction ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/MapAccessorWithDefaultValue.java b/src/graiph-driver/java/org/neo4j/driver/types/MapAccessorWithDefaultValue.java deleted file mode 100644 index f3bdb486..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/MapAccessorWithDefaultValue.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -import java.util.List; -import java.util.Map; - -import org.neo4j.driver.Value; -import java.util.function.Function; - -/** - * Provides methods to access the value of an underlying unordered map by key. - * When calling the methods, a user need to provides a default value, which will be given back if no match found by - * the key provided. - * The default value also servers the purpose of specifying the return type of the value found in map by key. - * If the type of the value found A differs from the type of the default value B, a cast from A to B would happen - * automatically. Note: Error might arise if the cast from A to B is not possible. - */ -public interface MapAccessorWithDefaultValue -{ - /** - * Retrieve the value with the given key. - * If no value found by the key, then the default value provided would be returned. - * @param key the key of the value - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @return the value found by the key or the default value if no such key exists - */ - Value get( String key, Value defaultValue ); - - /** - * Retrieve the object with the given key. - * If no object found by the key, then the default object provided would be returned. - * @param key the key of the object - * @param defaultValue the default object that would be returned if no object found by the key in the map - * @return the object found by the key or the default object if no such key exists - */ - Object get( String key, Object defaultValue ); - - /** - * Retrieve the number with the given key. - * If no number found by the key, then the default number provided would be returned. - * @param key the key of the number - * @param defaultValue the default number that would be returned if no number found by the key in the map - * @return the number found by the key or the default number if no such key exists - */ - Number get( String key, Number defaultValue ); - - /** - * Retrieve the entity with the given key. - * If no entity found by the key, then the default entity provided would be returned. - * @param key the key of the entity - * @param defaultValue the default entity that would be returned if no entity found by the key in the map - * @return the entity found by the key or the default entity if no such key exists - */ - Entity get( String key, Entity defaultValue ); - - /** - * Retrieve the node with the given key. - * If no node found by the key, then the default node provided would be returned. - * @param key the key of the node - * @param defaultValue the default node that would be returned if no node found by the key in the map - * @return the node found by the key or the default node if no such key exists - */ - Node get( String key, Node defaultValue ); - - /** - * Retrieve the path with the given key. - * If no path found by the key, then the default path provided would be returned. - * @param key the key of the property - * @param defaultValue the default path that would be returned if no path found by the key in the map - * @return the path found by the key or the default path if no such key exists - */ - Path get( String key, Path defaultValue ); - - /** - * Retrieve the value with the given key. - * If no value found by the key, then the default value provided would be returned. - * @param key the key of the property - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @return the value found by the key or the default value if no such key exists - */ - Relationship get( String key, Relationship defaultValue ); - - /** - * Retrieve the list of objects with the given key. - * If no value found by the key, then the default value provided would be returned. - * @param key the key of the value - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @return the list of objects found by the key or the default value if no such key exists - */ - List get( String key, List defaultValue ); - - /** - * Retrieve the list with the given key. - * If no value found by the key, then the default list provided would be returned. - * @param key the key of the value - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @param mapFunc the map function that defines how to map each element of the list from {@link Value} to T - * @param the type of the elements in the returned list - * @return the converted list found by the key or the default list if no such key exists - */ - List get( String key, List defaultValue, Function mapFunc ); - - /** - * Retrieve the map with the given key. - * If no value found by the key, then the default value provided would be returned. - * @param key the key of the property - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @return the map found by the key or the default value if no such key exists - */ - Map get( String key, Map defaultValue ); - - /** - * Retrieve the map with the given key. - * If no value found by the key, then the default map provided would be returned. - * @param key the key of the value - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @param mapFunc the map function that defines how to map each value in map from {@link Value} to T - * @param the type of the values in the returned map - * @return the converted map found by the key or the default map if no such key exists. - */ - Map get( String key, Map defaultValue, Function mapFunc ); - - /** - * Retrieve the java integer with the given key. - * If no integer found by the key, then the default integer provided would be returned. - * @param key the key of the property - * @param defaultValue the default integer that would be returned if no integer found by the key in the map - * @return the integer found by the key or the default integer if no such key exists - */ - int get( String key, int defaultValue ); - - /** - * Retrieve the java long number with the given key. - * If no value found by the key, then the default value provided would be returned. - * @param key the key of the property - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @return the java long number found by the key or the default value if no such key exists - */ - long get( String key, long defaultValue ); - - /** - * Retrieve the java boolean with the given key. - * If no value found by the key, then the default value provided would be returned. - * @param key the key of the property - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @return the java boolean found by the key or the default value if no such key exists - */ - boolean get( String key, boolean defaultValue ); - - /** - * Retrieve the java string with the given key. - * If no string found by the key, then the default string provided would be returned. - * @param key the key of the property - * @param defaultValue the default string that would be returned if no string found by the key in the map - * @return the string found by the key or the default string if no such key exists - */ - String get( String key, String defaultValue ); - - /** - * Retrieve the java float number with the given key. - * If no value found by the key, then the default value provided would be returned. - * @param key the key of the property - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @return the java float number found by the key or the default value if no such key exists - */ - float get( String key, float defaultValue ); - - /** - * Retrieve the java double number with the given key. - * If no value found by the key, then the default value provided would be returned. - * @param key the key of the property - * @param defaultValue the default value that would be returned if no value found by the key in the map - * @return the java double number found by the key or the default value if no such key exists - */ - double get( String key, double defaultValue ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/Node.java b/src/graiph-driver/java/org/neo4j/driver/types/Node.java deleted file mode 100644 index 93500708..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/Node.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -/** - * The Node interface describes the characteristics of a node from a Neo4j graph. - * @since 1.0 - */ -public interface Node extends Entity -{ - /** - * Return all labels. - * - * @return a label Collection - */ - Iterable labels(); - - /** - * Test if this node has a given label - * - * @param label the label - * @return {@code true} if this node has the label otherwise {@code false} - */ - boolean hasLabel( String label ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/Path.java b/src/graiph-driver/java/org/neo4j/driver/types/Path.java deleted file mode 100644 index 9715eecd..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/Path.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -import org.neo4j.driver.util.Immutable; - -/** - * A Path is a directed sequence of relationships between two nodes. This generally - * represents a traversal or walk through a graph and maintains a direction separate - * from that of any relationships traversed. - * - * It is allowed to be of size 0, meaning there are no relationships in it. In this case, - * it contains only a single node which is both the start and the end of the path. - * - *
- *     Path routeToStockholm = ..;
- *
- *     // Work with each segment of the path
- *     for( Segment segment : routeToStockholm )
- *     {
- *
- *     }
- * 
- * @since 1.0 - */ -@Immutable -public interface Path extends Iterable -{ - /** - * A segment combines a relationship in a path with a start and end node that describe the traversal direction - * for that relationship. This exists because the relationship has a direction between the two nodes that is - * separate and potentially different from the direction of the path. - * {@code - * Path: (n1)-[r1]->(n2)<-[r2]-(n3) - * Segment 1: (n1)-[r1]->(n2) - * Segment 2: (n2)<-[r2]-(n3) - * } - */ - interface Segment - { - /** @return the relationship underlying this path segment */ - Relationship relationship(); - - /** - * The node that this segment starts at. - * @return the start node - */ - Node start(); - - /** - * The node that this segment ends at. - * @return the end node - */ - Node end(); - } - - /** @return the start node of this path */ - Node start(); - - /** @return the end node of this path */ - Node end(); - - /** @return the number of segments in this path, which will be the same as the number of relationships */ - int length(); - - /** - * @param node the node to check for - * @return true if the specified node is contained in this path - */ - boolean contains( Node node ); - - /** - * @param relationship the relationship to check for - * @return true if the specified relationship is contained in this path - */ - boolean contains( Relationship relationship ); - - /** - * Create an iterable over the nodes in this path, nodes will appear in the same order as they appear - * in the path. - * - * @return an {@link java.lang.Iterable} of all nodes in this path - */ - Iterable nodes(); - - /** - * Create an iterable over the relationships in this path. The relationships will appear in the same order as they - * appear in the path. - * - * @return an {@link java.lang.Iterable} of all relationships in this path - */ - Iterable relationships(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/Point.java b/src/graiph-driver/java/org/neo4j/driver/types/Point.java deleted file mode 100644 index ad0c7b21..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/Point.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -import org.neo4j.driver.Values; -import org.neo4j.driver.util.Immutable; - -/** - * Represents a single point in a particular coordinate reference system. - *

- * Value that represents a point can be created using {@link Values#point(int, double, double)} - * or {@link Values#point(int, double, double, double)} method. - */ -@Immutable -public interface Point -{ - /** - * Retrieve identifier of the coordinate reference system for this point. - * - * @return coordinate reference system identifier. - */ - int srid(); - - /** - * Retrieve {@code x} coordinate of this point. - * - * @return the {@code x} coordinate value. - */ - double x(); - - /** - * Retrieve {@code y} coordinate of this point. - * - * @return the {@code y} coordinate value. - */ - double y(); - - /** - * Retrieve {@code z} coordinate of this point. - * - * @return the {@code z} coordinate value or {@link Double#NaN} if not applicable. - */ - double z(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/Relationship.java b/src/graiph-driver/java/org/neo4j/driver/types/Relationship.java deleted file mode 100644 index 0a985848..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/Relationship.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -/** - * The Relationship interface describes the characteristics of a relationship from a Neo4j graph. - * @since 1.0 - */ -public interface Relationship extends Entity -{ - /** - * Id of the node where this relationship starts. - * @return the node id - */ - long startNodeId(); - - /** - * Id of the node where this relationship ends. - * @return the node id - */ - long endNodeId(); - - /** - * Return the type of this relationship. - * - * @return the type name - */ - String type(); - - /** - * Test if this relationship has the given type - * - * @param relationshipType the give relationship type - * @return {@code true} if this relationship has the given relationship type otherwise {@code false} - */ - boolean hasType( String relationshipType ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/Type.java b/src/graiph-driver/java/org/neo4j/driver/types/Type.java deleted file mode 100644 index 17b25d68..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/Type.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -import org.neo4j.driver.Value; -import org.neo4j.driver.util.Experimental; -import org.neo4j.driver.util.Immutable; - -/** - * The type of a {@link Value} as defined by the Cypher language - * @since 1.0 - */ -@Immutable -@Experimental -public interface Type -{ - /** - * @return the name of the Cypher type (as defined by Cypher) - */ - String name(); - - /** - * Test if the given value has this type - * - * @param value the value - * @return {@code true} if the value is a value of this type otherwise {@code false} - */ - boolean isTypeOf( Value value ); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/types/TypeSystem.java b/src/graiph-driver/java/org/neo4j/driver/types/TypeSystem.java deleted file mode 100644 index d90675ab..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/types/TypeSystem.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.types; - -import org.neo4j.driver.util.Experimental; -import org.neo4j.driver.util.Immutable; - -/** - * A listing of all database types this driver can handle. - * @since 1.0 - */ -@Immutable -@Experimental -public interface TypeSystem -{ - Type ANY(); - - Type BOOLEAN(); - - Type BYTES(); - - Type STRING(); - - Type NUMBER(); - - Type INTEGER(); - - Type FLOAT(); - - Type LIST(); - - Type MAP(); - - Type NODE(); - - Type RELATIONSHIP(); - - Type PATH(); - - Type POINT(); - - Type DATE(); - - Type TIME(); - - Type LOCAL_TIME(); - - Type LOCAL_DATE_TIME(); - - Type DATE_TIME(); - - Type DURATION(); - - Type NULL(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/util/Experimental.java b/src/graiph-driver/java/org/neo4j/driver/util/Experimental.java deleted file mode 100644 index 434ef5b2..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/util/Experimental.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.util; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Inherited; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotated elements are experimental and may change without deprecation across driver releases. - * @since 1.0 - */ -@Inherited -@Retention(RetentionPolicy.RUNTIME) -@Documented -@Target( { ElementType.TYPE, ElementType.METHOD } ) -public @interface Experimental -{ -} diff --git a/src/graiph-driver/java/org/neo4j/driver/util/Immutable.java b/src/graiph-driver/java/org/neo4j/driver/util/Immutable.java deleted file mode 100644 index 6cbeadd8..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/util/Immutable.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.util; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Inherited; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Indicates that instances of the annotated class or of its subclasses are immutable, i.e. - * do not provide any means of mutating their state - * @since 1.0 - */ -@Inherited -@Retention(RetentionPolicy.RUNTIME) -@Documented -@Target( { ElementType.TYPE } ) -public @interface Immutable -{ -} diff --git a/src/graiph-driver/java/org/neo4j/driver/util/Pair.java b/src/graiph-driver/java/org/neo4j/driver/util/Pair.java deleted file mode 100644 index 3d510e39..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/util/Pair.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.util; - -/** - * Immutable pair of a key and a value - * - * @param the Java type of the contained value - * @since 1.0 - */ -@Immutable -public interface Pair -{ - /** - * @return the property key - */ - K key(); - - /** - * @return the property value - */ - V value(); -} diff --git a/src/graiph-driver/java/org/neo4j/driver/util/Resource.java b/src/graiph-driver/java/org/neo4j/driver/util/Resource.java deleted file mode 100644 index 66039d36..00000000 --- a/src/graiph-driver/java/org/neo4j/driver/util/Resource.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.neo4j.driver.util; - -/** - * A Resource is an {@link AutoCloseable} that allows introspecting if it - * already has been closed through its {@link #isOpen()} method. - * - * Additionally, calling {@link AutoCloseable#close()} twice is expected to fail - * (i.e. is not idempotent). - * @since 1.0 - */ -public interface Resource extends AutoCloseable -{ - /** - * Detect whether this resource is still open - * - * @return true if the resource is open - */ - boolean isOpen(); - - /** - * @throws IllegalStateException if already closed - */ - @Override - void close(); -} diff --git a/src/test/resources/gNode.properties b/src/test/resources/gNode.properties deleted file mode 100644 index 0c345d4d..00000000 --- a/src/test/resources/gNode.properties +++ /dev/null @@ -1,5 +0,0 @@ -zkServerAddress=10.0.86.26:2181 -gNodeServiceAddress=159.226.193.204:7688 -sessionTimeout=20000 -connectionTimeout=10000 -registryPath=/gnodes \ No newline at end of file diff --git a/src/test/scala/cypher-plus/GraiphServerStarter.scala b/src/test/scala/cypher-plus/GraiphServerStarter.scala deleted file mode 100644 index 31d0a68e..00000000 --- a/src/test/scala/cypher-plus/GraiphServerStarter.scala +++ /dev/null @@ -1,12 +0,0 @@ -import java.io.File -import cn.graiph.server.GraiphServer - -/** - * Created by bluejoe on 2019/7/17. - */ -object GraiphServerStarter { - def main(args: Array[String]) { - GraiphServer.startServer(new File("./output/testdb"), - new File("./testdata/neo4j.conf")); - } -} diff --git a/src/test/scala/cypher-plus/LocalGraiphTest.scala b/src/test/scala/cypher-plus/LocalGraiphTest.scala deleted file mode 100644 index 6f638c00..00000000 --- a/src/test/scala/cypher-plus/LocalGraiphTest.scala +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -import java.io.{File, FileInputStream} - -import cn.graiph.blob.Blob -import org.apache.commons.io.IOUtils -import org.neo4j.graphdb.Node -import org.neo4j.kernel.impl.InstanceContext -import org.neo4j.kernel.impl.blob.BlobStorage -import org.scalatest.{BeforeAndAfter, FunSuite} - -import scala.collection.JavaConversions - -class LocalGraiphTest extends FunSuite with BeforeAndAfter with TestBase { - - before { - setupNewDatabase(); - } - - test("test blob R/W using API") { - //reload database - val db2 = openDatabase(); - val tx2 = db2.beginTx(); - //get first node - val it = db2.getAllNodes().iterator(); - val v1: Node = it.next(); - val v2: Node = it.next(); - - println(v1.getAllProperties); - assert(4 == v1.getAllProperties.size()); - - val blob = v1.getProperty("photo").asInstanceOf[Blob]; - - assert(new File("./testdata/test.png").length() == blob.length); - - assert(new File("./testdata/test.png").length() == blob.offerStream { - IOUtils.toByteArray(_).length - }); - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - blob.toBytes()); - - //test array[blob] - val blob2 = v1.getProperty("album").asInstanceOf[Array[Blob]]; - assert(6 == blob2.length); - - assert((0 to 5).toArray.map { x => - IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) - } - === - blob2.map { - _.offerStream { - IOUtils.toByteArray(_) - } - }); - - val blob3 = v2.getProperty("photo").asInstanceOf[Blob]; - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test1.png"))) === - blob3.toBytes()); - - tx2.close(); - db2.shutdown(); - } - - test("remove a blob property") { - val db2 = openDatabase(); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 8); - - val tx2 = db2.beginTx(); - - //get first node - val it = db2.getAllNodes().iterator(); - val v1: Node = it.next(); - //delete one - v1.removeProperty("photo"); - - //should not be deleted now - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 8); - - tx2.success(); - tx2.close(); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 7); - db2.shutdown(); - } - - test("remove a blob array property") { - val db2 = openDatabase(); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 8); - - val tx2 = db2.beginTx(); - - //get first node - val it = db2.getAllNodes().iterator(); - val v1: Node = it.next(); - v1.removeProperty("album"); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 8); - tx2.success(); - tx2.close(); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 2); - db2.shutdown(); - } - - test("set a blob property to other") { - val db2 = openDatabase(); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 8); - - val tx2 = db2.beginTx(); - - //get first node - val it = db2.getAllNodes().iterator(); - val v1: Node = it.next(); - - v1.setProperty("album", 1); - v1.setProperty("photo", 1); - - tx2.success(); - tx2.close(); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 1); - - db2.shutdown(); - } - - test("remove a record with blob properties") { - val db2 = openDatabase(); - val tx2 = db2.beginTx(); - - //get first node - val it = db2.getAllNodes().iterator(); - val v1: Node = it.next(); - - v1.delete(); - - tx2.success(); - tx2.close(); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 1); - - db2.shutdown(); - } - - test("test blob using Cypher query") { - //reload database - val db2 = openDatabase(); - val tx2 = db2.beginTx(); - - //cypher query - val r1 = db2.execute("match (n) where n.name='bob' return n.photo,n.name,n.age,n.album").next(); - assert("bob" === r1.get("n.name")); - assert(40 == r1.get("n.age")); - - val blob22 = r1.get("n.album").asInstanceOf[Array[Blob]]; - assert(6 == blob22.length); - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - blob22(0).toBytes()); - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - blob22(5).toBytes()); - - val blob1 = r1.get("n.photo").asInstanceOf[Blob]; - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - blob1.toBytes()); - - val blob3 = db2.execute("match (n) where n.name='alex' return n.photo").next() - .get("n.photo").asInstanceOf[Blob]; - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test1.png"))) === - blob3.toBytes()); - - tx2.success(); - tx2.close(); - db2.shutdown(); - } - - test("test blob using Cypher create") { - //reload database - val db2 = openDatabase(); - val tx2 = db2.beginTx(); - - db2.execute("CREATE (n {name:{NAME}})", - JavaConversions.mapAsJavaMap(Map("NAME" -> "张三"))); - - db2.execute("CREATE (n {name:{NAME}, photo:{BLOB_OBJECT}})", - JavaConversions.mapAsJavaMap(Map("NAME" -> "张三", "BLOB_OBJECT" -> Blob.EMPTY))); - - db2.execute("CREATE (n {name:{NAME}, photo:{BLOB_OBJECT}})", - JavaConversions.mapAsJavaMap(Map("NAME" -> "张三", "BLOB_OBJECT" -> Blob.fromFile(new File("./testdata/test1.png"))))); - - assert(3.toLong === db2.execute("match (n) where n.name=$NAME return count(n)", - JavaConversions.mapAsJavaMap(Map("NAME" -> "张三"))).next().get("count(n)")); - - val it2 = db2.execute("match (n) where n.name=$NAME return n.photo", - JavaConversions.mapAsJavaMap(Map("NAME" -> "张三"))); - - assert(null == - it2.next().get("n.photo")); - - assert(it2.next().get("n.photo") === Blob.EMPTY); - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test1.png"))) === - it2.next().get("n.photo").asInstanceOf[Blob].toBytes()); - - tx2.success(); - tx2.close(); - db2.shutdown(); - } -} diff --git a/src/test/scala/cypher-plus/MultiGraiphTest.scala b/src/test/scala/cypher-plus/MultiGraiphTest.scala deleted file mode 100644 index 8f480898..00000000 --- a/src/test/scala/cypher-plus/MultiGraiphTest.scala +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -import java.io.{File, FileInputStream} - -import cn.graiph.blob.Blob -import org.apache.commons.io.IOUtils -import org.neo4j.graphdb.{GraphDatabaseService, Node, Transaction} -import org.neo4j.kernel.impl.InstanceContext -import org.neo4j.kernel.impl.blob.BlobStorage -import org.scalatest.{BeforeAndAfter, FunSuite} - -/** - * Created by bluejoe on 2019/4/11. - */ -class MultiGraiphTest extends FunSuite with BeforeAndAfter with TestBase { - - before { - setupNewDatabase(); - } - - def testCreateBlob(db: GraphDatabaseService, name: String, photo: File): Transaction = { - val tx = db.beginTx(); - //create a node - val node1 = db.createNode(); - node1.setProperty("name", name); - //with a blob property - node1.setProperty("photo", Blob.fromFile(photo)); - - tx; - } - - def testQuery(db: GraphDatabaseService, file: File): Unit = { - val tx = db.beginTx(); - - //get first node - val it = db.getAllNodes().iterator(); - val v1: Node = it.next(); - - assert(false == it.hasNext); - assert(3 == v1.getAllProperties.size()); - - val blob = v1.getProperty("photo").asInstanceOf[Blob]; - assert(IOUtils.toByteArray(new FileInputStream(file)) === - blob.toBytes()); - - tx.success(); - tx.close(); - } - - test("test multiple db transaction") { - val db = openDatabase(); - assert(InstanceContext.of(db).get[BlobStorage].iterator().size == 8); - - val tx1 = testCreateBlob(db, "lawson", new File("./testdata/test.png")); - val tx2 = testCreateBlob(db, "alex", new File("./testdata/test1.png")); - - tx2.success(); - tx2.close(); - - assert(InstanceContext.of(db).get[BlobStorage].iterator().size == 8); - - tx1.success(); - tx1.close(); - - //top level transaction commit - assert(InstanceContext.of(db).get[BlobStorage].iterator().size == 10); - - db.shutdown() - } - - test("test multiple db instances") { - val testDbDir1 = new File("./testdata/testdb1/db"); - val testDbDir2 = new File("./testdata/testdb2/db"); - - setupNewDatabase(testDbDir1); - setupNewDatabase(testDbDir2); - - val db1 = openDatabase(testDbDir1); - val db2 = openDatabase(testDbDir2); - - assert(InstanceContext.of(db1).get[BlobStorage].iterator().size == 8); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 8); - - val tx1 = testCreateBlob(db1, "lawson", new File("./testdata/test.png")); - val tx2 = testCreateBlob(db2, "alex", new File("./testdata/test1.png")); - - tx2.success(); - tx2.close(); - - assert(InstanceContext.of(db1).get[BlobStorage].iterator().size == 8); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 9); - - tx1.success(); - tx1.close(); - - assert(InstanceContext.of(db1).get[BlobStorage].iterator().size == 9); - assert(InstanceContext.of(db2).get[BlobStorage].iterator().size == 9); - - db1.shutdown() - db2.shutdown() - } -} \ No newline at end of file diff --git a/src/test/scala/cypher-plus/RemoteGraiphTest.scala b/src/test/scala/cypher-plus/RemoteGraiphTest.scala deleted file mode 100644 index b1ae0668..00000000 --- a/src/test/scala/cypher-plus/RemoteGraiphTest.scala +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright (c) 2002-2019 "Neo4j," - * Neo4j Sweden AB [http://neo4j.com] - * - * This file is part of Neo4j. - * - * Neo4j is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -import java.io.{File, FileInputStream} -import java.net.URL - -import cn.graiph.blob.Blob -import cn.graiph.driver.RemoteGraiph -import cn.graiph.server.GraiphServer -import org.apache.commons.io.IOUtils -import org.neo4j.driver._ -import org.scalatest.{BeforeAndAfter, FunSuite} - -class RemoteGraiphTest extends FunSuite with BeforeAndAfter with TestBase { - - var server: GraiphServer = _; - - before { - setupNewDatabase(new File("./output/testdb/data/databases/graph.db")); - server = GraiphServer.startServer(testDbDir, new File(testConfPath)); - } - - after { - server.shutdown() - } - - test("test blob R/W via cypher") { - val conn = RemoteGraiph.connect("bolt://localhost:7687"); - //a non-blob - val (node, name, age) = conn.querySingleObject("match (n) where n.name='bob' return n, n.name, n.age", (result: Record) => { - (result.get("n").asNode(), result.get("n.name").asString(), result.get("n.age").asInt()) - }); - - assert("bob" === name); - assert(40 == age); - - val nodes = conn.queryObjects("match (n) return n", (result: Record) => { - result.get("n").asNode() - }); - - assert(2 == nodes.length); - - //blob - val blob0 = conn.querySingleObject("return Blob.empty()", (result: Record) => { - result.get(0).asBlob - }); - - assert(0 == blob0.length); - - conn.querySingleObject("return Blob.fromFile('./testdata/test.png')", (result: Record) => { - val blob1 = result.get(0).asBlob - assert(new File("./testdata/test.png").length() == blob1.toBytes().length); - blob1.offerStream(is => { - //remote input stream should be closed - is.read(); - }) - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - blob1.toBytes()); - 1; - }); - - var blob20: Blob = null; - - conn.querySingleObject("match (n) where n.name='bob' return n.photo,n.album,Blob.len(n.photo) as len", (result: Record) => { - val blob2 = result.get("n.photo").asBlob; - blob20 = blob2; - val album = result.get("n.album").asList(); - val len = result.get("len").asInt() - - assert(len == new File("./testdata/test.png").length()); - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - blob2.offerStream { - IOUtils.toByteArray(_) - }); - - assert(6 == album.size()); - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - album.get(0).asInstanceOf[Blob].offerStream { - IOUtils.toByteArray(_) - }); - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - album.get(5).asInstanceOf[Blob].offerStream { - IOUtils.toByteArray(_) - }); - }); - - //now, blob is unaccessible - val ex = - try { - blob20.offerStream { - IOUtils.toByteArray(_) - }; - - false; - } - catch { - case _ => true; - } - - assert(ex); - - conn.querySingleObject("match (n) where n.name='alex' return n.photo", (result: Record) => { - val blob3 = result.get("n.photo").asBlob - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test1.png"))) === - blob3.offerStream { - IOUtils.toByteArray(_) - }); - }); - - //query with parameters - val blob4 = conn.querySingleObject("match (n) where n.name={NAME} return n.photo", - Map("NAME" -> "bob"), (result: Record) => { - result.get("n.photo").asBlob - }); - - //commit new records - conn.executeUpdate("CREATE (n {name:{NAME}})", - Map("NAME" -> "张三")); - - conn.executeUpdate("CREATE (n {name:{NAME}, photo:{BLOB_OBJECT}})", - Map("NAME" -> "张三", "BLOB_OBJECT" -> Blob.EMPTY)); - - conn.executeUpdate("CREATE (n {name:{NAME}, photo:{BLOB_OBJECT}})", - Map("NAME" -> "张三", "BLOB_OBJECT" -> Blob.fromFile(new File("./testdata/test1.png")))); - - conn.executeQuery("return {BLOB_OBJECT}", - Map("BLOB_OBJECT" -> Blob.fromFile(new File("./testdata/test.png")))); - - conn.querySingleObject("return {BLOB_OBJECT}", - Map("BLOB_OBJECT" -> Blob.fromFile(new File("./testdata/test.png"))), (result: Record) => { - val blob = result.get(0).asBlob - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - blob.offerStream { - IOUtils.toByteArray(_) - }); - - }); - } - - test("test blob R/W via blob literal") { - val conn = RemoteGraiph.connect("bolt://localhost:7687"); - - //blob - val blob0 = conn.querySingleObject("return ", (result: Record) => { - result.get(0).asBlob - }); - - assert(0 == blob0.length); - - //blob - val blob01 = conn.querySingleObject("return ", (result: Record) => { - result.get(0).asBlob - }); - - assert("this is an example".getBytes() === blob01.toBytes()); - - //test localfile - conn.querySingleObject("return ", (result: Record) => { - val blob1 = result.get(0).asBlob - - assert(IOUtils.toByteArray(new FileInputStream(new File("./testdata/test.png"))) === - blob1.toBytes()); - }); - - //test http - conn.querySingleObject("return ", (result: Record) => { - val blob2 = result.get(0).asBlob - - assert(IOUtils.toByteArray(new URL("http://img.zcool.cn/community/049f6b5674911500000130b7f00a87.jpg")) === - blob2.toBytes()); - }); - - //test https - val blob3 = conn.querySingleObject("return ", (result: Record) => { - result.get(0).asBlob.toBytes() - }); - - assert(IOUtils.toByteArray(new URL("https://avatars0.githubusercontent.com/u/2328905?s=460&v=4")) === - blob3); - - assert(conn.querySingleObject("return Blob.len()", (result: Record) => { - result.get(0).asInt - }) == new File("./testdata/test.png").length()); - } -} diff --git a/src/test/scala/cypher-plus/SemOpTest.scala b/src/test/scala/cypher-plus/SemOpTest.scala deleted file mode 100644 index 0d2d9737..00000000 --- a/src/test/scala/cypher-plus/SemOpTest.scala +++ /dev/null @@ -1,109 +0,0 @@ -import java.io.File - -import org.apache.commons.io.FileUtils -import org.junit.{Assert, Test} - -class SemOpTest extends TestBase { - @Test - def testLike(): Unit = { - //create a new database - val db = openDatabase(); - val tx = db.beginTx(); - - Assert.assertEquals(true, db.execute("return Blob.empty() ~:0.5 Blob.empty() as r").next().get("r").asInstanceOf[Boolean]); - Assert.assertEquals(true, db.execute("return Blob.empty() ~:0.5 Blob.empty() as r").next().get("r").asInstanceOf[Boolean]); - Assert.assertEquals(true, db.execute("return Blob.empty() ~:1.0 Blob.empty() as r").next().get("r").asInstanceOf[Boolean]); - - Assert.assertEquals(true, db.execute("return Blob.empty() ~: Blob.empty() as r").next().get("r").asInstanceOf[Boolean]); - - Assert.assertEquals(true, db.execute( - """return Blob.fromFile('./testdata/mayun1.jpeg') - ~: Blob.fromFile('./testdata/mayun2.jpeg') as r""") - .next().get("r").asInstanceOf[Boolean]); - - Assert.assertEquals(false, db.execute( - """return Blob.fromFile('./testdata/mayun1.jpeg') - ~: Blob.fromFile('./testdata/lqd.jpeg') as r""") - .next().get("r").asInstanceOf[Boolean]); - - Assert.assertEquals(true, db.execute("""return Blob.fromFile('./testdata/car1.jpg') ~: '.*NB666.*' as r""") - .next().get("r").asInstanceOf[Boolean]); - - tx.success(); - tx.close(); - db.shutdown(); - } - - @Test - def testCompare(): Unit = { - //create a new database - val db = openDatabase(); - val tx = db.beginTx(); - - try { - Assert.assertEquals(1.toLong, db.execute("return 1 :: 2 as r").next().get("r")); - Assert.assertTrue(false); - } - catch { - case _: Throwable => Assert.assertTrue(true); - } - - Assert.assertEquals(true, db.execute("return :: as r").next().get("r").asInstanceOf[Double] > 0.7); - Assert.assertEquals(true, db.execute("return :: as r").next().get("r").asInstanceOf[Double] > 0.6); - Assert.assertEquals(true, db.execute("return '杜 一' :: '杜一' > 0.6 as r").next().get("r")); - Assert.assertEquals(true, db.execute("return '杜 一' ::jaro '杜一' > 0.6 as r").next().get("r")); - - db.execute("return '杜 一' ::jaro '杜一','Zhihong SHEN' ::levenshtein 'SHEN Z.H'"); - - tx.success(); - tx.close(); - db.shutdown(); - } - - @Test - def testCustomProperty1(): Unit = { - //create a new database - val db = openDatabase(); - val tx = db.beginTx(); - - Assert.assertEquals(new File("./testdata/car1.jpg").length(), - db.execute("""return Blob.fromFile('./testdata/car1.jpg')->length as x""") - .next().get("x")); - - Assert.assertEquals("image/jpeg", db.execute("""return Blob.fromFile('./testdata/car1.jpg')->mime as x""") - .next().get("x")); - - Assert.assertEquals(500, db.execute("""return Blob.fromFile('./testdata/car1.jpg')->width as x""") - .next().get("x")); - - Assert.assertEquals(333, db.execute("""return Blob.fromFile('./testdata/car1.jpg')->height as x""") - .next().get("x")); - - Assert.assertEquals(333, db.execute("""return ->height as x""") - .next().get("x")); - - Assert.assertEquals(null, db.execute("""return Blob.fromFile('./testdata/car1.jpg')->notExist as x""") - .next().get("x")); - - tx.success(); - tx.close(); - db.shutdown(); - } - - @Test - def testCustomProperty2(): Unit = { - //create a new database - val db = openDatabase(); - val tx = db.beginTx(); - - Assert.assertEquals("苏E730V7", db.execute("""return Blob.fromFile('./testdata/car1.jpg')->plateNumber as r""") - .next().get("r")); - - Assert.assertEquals("我今天早上吃了两个包子", db.execute("""return Blob.fromFile('./testdata/test.wav')->message as r""") - .next().get("r").asInstanceOf[Boolean]); - - tx.success(); - tx.close(); - db.shutdown(); - } -} \ No newline at end of file diff --git a/src/test/scala/cypher-plus/TestBase.scala b/src/test/scala/cypher-plus/TestBase.scala deleted file mode 100644 index b5f03fd6..00000000 --- a/src/test/scala/cypher-plus/TestBase.scala +++ /dev/null @@ -1,47 +0,0 @@ -import java.io.File - -import cn.graiph.blob.Blob -import cn.graiph.db.GraiphDB -import org.apache.commons.io.FileUtils -import org.neo4j.graphdb.GraphDatabaseService - -/** - * Created by bluejoe on 2019/4/13. - */ -trait TestBase { - val testDbDir = new File("./output/testdb"); - val testConfPath = new File("./testdata/neo4j.conf").getPath; - - def setupNewDatabase(dbdir: File = testDbDir, conf: String = testConfPath): Unit = { - FileUtils.deleteDirectory(dbdir); - //create a new database - val db = openDatabase(dbdir, conf); - val tx = db.beginTx(); - //create a node - val node1 = db.createNode(); - - node1.setProperty("name", "bob"); - node1.setProperty("age", 40); - - //with a blob property - node1.setProperty("photo", Blob.fromFile(new File("./testdata/test.png"))); - //blob array - node1.setProperty("album", (0 to 5).map(x => Blob.fromFile(new File("./testdata/test.png"))).toArray); - - val node2 = db.createNode(); - node2.setProperty("name", "alex"); - //with a blob property - node2.setProperty("photo", Blob.fromFile(new File("./testdata/test1.png"))); - node2.setProperty("age", 10); - - //node2.createRelationshipTo(node1, RelationshipType.withName("dad")); - - tx.success(); - tx.close(); - db.shutdown(); - } - - def openDatabase(dbdir: File = testDbDir, conf: String = testConfPath): GraphDatabaseService = { - GraiphDB.openDatabase(dbdir.getAbsoluteFile.getCanonicalFile, new File(conf).getAbsoluteFile.getCanonicalFile) - } -} diff --git a/src/test/scala/external-properties/CreatePipeQueryTest.scala b/src/test/scala/external-properties/CreatePipeQueryTest.scala deleted file mode 100644 index cbbe5c7f..00000000 --- a/src/test/scala/external-properties/CreatePipeQueryTest.scala +++ /dev/null @@ -1,52 +0,0 @@ - -import java.io.File - -import org.junit.{Assert, Before, Test} -import org.neo4j.graphdb.factory.GraphDatabaseFactory -import org.neo4j.graphdb.{Label, RelationshipType} -import org.neo4j.io.fs.FileUtils -import org.neo4j.kernel.impl.{CustomPropertyNodeStoreHolder, InMemoryPropertyNodeStore, LoggingPropertiesStore, Settings} - - -trait CreateQueryTestBase { - Settings._hookEnabled = false; - - @Before - def initdb(): Unit = { - new File("./output/testdb").mkdirs(); - FileUtils.deleteRecursively(new File("./output/testdb")); - val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - db.shutdown(); - } - - protected def testQuery(query: String): Unit = { - val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - val tx = db.beginTx(); - val rs = db.execute(query); - while (rs.hasNext) { - val row = rs.next(); - println(row); - } - tx.success(); - tx.close() - db.shutdown(); - } -} - -class CreateNodeQueryTest extends CreateQueryTestBase { - Settings._hookEnabled = true; - val tmpns = new InMemoryPropertyNodeStore() - CustomPropertyNodeStoreHolder.hold(new LoggingPropertiesStore(tmpns)); - - @Test - def test1(): Unit = { - Assert.assertEquals(0, tmpns.nodes.size) - testQuery("CREATE (n:Person { name:'test01', age:10}) RETURN n.name, id(n)"); - testQuery("CREATE (n:Person { name:'test02', age:20}) RETURN n.name, id(n)"); - testQuery("CREATE (n:Person { name:'test03', age:20}) RETURN n.name, id(n)"); - testQuery("MATCH (n) RETURN n.name"); - Assert.assertEquals(3, tmpns.nodes.size) - } - - -} diff --git a/src/test/scala/external-properties/CreateQueryTest.scala b/src/test/scala/external-properties/CreateQueryTest.scala deleted file mode 100644 index 72f107d6..00000000 --- a/src/test/scala/external-properties/CreateQueryTest.scala +++ /dev/null @@ -1,135 +0,0 @@ -import java.io.File - -import org.junit.{After, Before, Test} -import org.neo4j.graphdb.factory.GraphDatabaseFactory -import org.neo4j.graphdb.{GraphDatabaseService, Label, RelationshipType, Result} -import org.neo4j.io.fs.FileUtils -import org.neo4j.kernel.impl.{CustomPropertyNodeStoreHolder, InMemoryPropertyNodeStore, LoggingPropertiesStore, Settings} - - -class CreateQueryTest { - Settings._hookEnabled = true - val tmpns = new InMemoryPropertyNodeStore() - CustomPropertyNodeStoreHolder.hold(new LoggingPropertiesStore(tmpns)) - - var db: GraphDatabaseService = null - - @Before - def initdb(): Unit = { - new File("./output/testdb").mkdirs() - FileUtils.deleteRecursively(new File("./output/testdb")) - db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - //db.shutdown() - } - - @After - def closeDb(): Unit = { - db.shutdown() - } - - protected def testQuery(queryStr: String): Result = { - //val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - val tx = db.beginTx() - val rs = db.execute(queryStr) - tx.success() - tx.close() - rs - } - - - protected def assertResultRowsCount(rowsCount: Int, queryStr: String="match (n) return n"): Unit = { - // val result = testQuery(queryStr) - var size = 0 - val tx = db.beginTx() - val rs = db.execute(queryStr) - while (rs.hasNext) { - val row = rs.next() - println(row) - size += 1 - } - tx.success() - tx.close() - assert(rowsCount == size) - } - - - @Test - def test1(): Unit = { - // create one node - testQuery("create (n) return n") - testQuery("create (n) return n") - assertResultRowsCount(2) - } - - @Test - def test2(): Unit = { - // create multiple nodes - testQuery("create (n),(m) return n,m") - testQuery("create (n1),(n2),(n3) return n1,n2,n3") - assertResultRowsCount(5) - } - - @Test - def test3(): Unit = { - // create node with labels - testQuery("create (n:Person) return labels(n)") - testQuery("create (n:Person:Man) return labels(n)") - testQuery("create (n:Person:Man:Doc) return labels(n)") - assertResultRowsCount(3, "match (n:Person) return n") - assertResultRowsCount(2, "match (n:Man) return n") - assertResultRowsCount(1, "match (n:Doc) return n") - } - - @Test - def test4(): Unit = { - // create node with labels and properties - testQuery("create (n:Person{name:'andy',age:12}) return n.name,n.age") - testQuery("create (n:Person{name:'bob',age:12}) return n.name,n.age") - assertResultRowsCount(2, "match (n:Person) return n") - assertResultRowsCount(1, "match (n:Person) where n.name='bob' return n") - assertResultRowsCount(2, "match (n:Person) where n.age=12 return n") - } - - @Test - def test5(): Unit = { - // create relationship - testQuery("create (n:Person{name:'A',age:12}) return n.name,n.age") - testQuery("create (n:Person{name:'B',age:12}) return n.name,n.age") - val queryStr = - """ - |MATCH (a:Person),(b:Person) - |WHERE a.name = 'A' AND b.name = 'B' - |CREATE (a)-[r:RELTYPE]->(b) - |RETURN type(r) - """.stripMargin - assertResultRowsCount(1, queryStr ) - } - - @Test - def test6(): Unit = { - // create relationship and set properties - testQuery("create (n:Person{name:'A',age:12}) return n.name,n.age") - testQuery("create (n:Person{name:'B',age:10}) return n.name,n.age") - val queryStr = - """ - |MATCH (a:Person),(b:Person) - |WHERE a.name = 'A' AND b.name = 'B' - |CREATE (a)-[r:RELTYPE { name: "friend"}]->(b) - |RETURN type(r), r.name - """.stripMargin - testQuery(queryStr) - assertResultRowsCount(1, "match (a)-[r:RELTYPE { name: 'friend'}]->(b) return type(r), r.name" ) - } - - @Test - def test7(): Unit = { - // Create a full path - val queryStr = - """ - |CREATE p =(andy { name:'Andy' })-[:WORKS_AT]->(neo)<-[:WORKS_AT]-(michael { name: 'Michael' }) - |RETURN p - """.stripMargin - assertResultRowsCount(1, queryStr ) - } - -} \ No newline at end of file diff --git a/src/test/scala/external-properties/DeletePipeQueryTest.scala b/src/test/scala/external-properties/DeletePipeQueryTest.scala deleted file mode 100644 index 3ddad3d5..00000000 --- a/src/test/scala/external-properties/DeletePipeQueryTest.scala +++ /dev/null @@ -1,71 +0,0 @@ - -import java.io.File - -import org.junit.{Assert, Before, Test} -import org.neo4j.graphdb.factory.GraphDatabaseFactory -import org.neo4j.graphdb.{Label, RelationshipType} -import org.neo4j.io.fs.FileUtils -import org.neo4j.kernel.impl.{CustomPropertyNodeStoreHolder, InMemoryPropertyNodeStore, LoggingPropertiesStore, Settings} - - -trait DeletePipeQueryTestBase { - Settings._hookEnabled = false; - - @Before - def initdb(): Unit = { - new File("./output/testdb").mkdirs(); - FileUtils.deleteRecursively(new File("./output/testdb")); - val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - - val tx = db.beginTx(); - //create a node - val node1 = db.createNode(); - - node1.setProperty("name", "test01"); - node1.setProperty("age", 10); - node1.addLabel(new Label { - override def name(): String = "man" - }) - - val node2 = db.createNode(); - node2.setProperty("name", "test02"); - node2.setProperty("age", 40); - node2.addLabel(new Label { - override def name(): String = "man" - }) - tx.success() - tx.close() - db.shutdown(); - } - - protected def testQuery(query: String): Unit = { - initdb(); - val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - val tx = db.beginTx(); - val rs = db.execute(query); - while (rs.hasNext) { - val row = rs.next(); - println(row); - } - tx.success(); - tx.close() - db.shutdown(); - } -} - -class DeletePipeQueryTest extends DeletePipeQueryTestBase { - Settings._hookEnabled = true; - val tmpns = new InMemoryPropertyNodeStore() - CustomPropertyNodeStoreHolder.hold(new LoggingPropertiesStore(tmpns)); - - - @Test - def test1(): Unit = { - - Assert.assertEquals(2, tmpns.nodes.size) - testQuery("MATCH (n) WHERE 18>n.age DELETE n RETURN n.name"); - Assert.assertEquals(1, tmpns.nodes.size) - - } - -} diff --git a/src/test/scala/external-properties/GNodeListTest.scala b/src/test/scala/external-properties/GNodeListTest.scala deleted file mode 100644 index 2efbc8ac..00000000 --- a/src/test/scala/external-properties/GNodeListTest.scala +++ /dev/null @@ -1,42 +0,0 @@ -import java.io.FileInputStream -import java.net.InetAddress -import java.util.Properties - -import cn.graiph.cnode.{NodeAddress, ZKServiceRegistry} -import org.junit.{Assert, Test} -class GNodeListTest { - - @Test - def testProperties(): Unit = { - val path = Thread.currentThread().getContextClassLoader.getResource("gNode.properties").getPath; - val prop = new Properties() - prop.load(new FileInputStream(path)) - Assert.assertEquals("10.0.86.26:2181",prop.getProperty("zkServerAddress")) - Assert.assertEquals("159.226.193.204:7688",prop.getProperty("gNodeServiceAddress")) - Assert.assertEquals("20000",prop.getProperty("sessionTimeout")) - } - - @Test - def testGetLocalIP(): Unit = { - val localhostIP = InetAddress.getLocalHost().getHostAddress() - Assert.assertEquals("159.226.193.204",localhostIP) - } - - @Test - def testCreateNodeAddress(): Unit = { - val str = "10.0.88.99:1234" - val nodeAddress = NodeAddress.fromString(str) - println(nodeAddress) - } - - @Test - def testGetReadList(): Unit ={ - registerAsReadNode() - } - - def registerAsReadNode(): Unit ={ - val register = new ZKServiceRegistry() - register.registry("read","10.0.88.99:1111") - } - -} \ No newline at end of file diff --git a/src/test/scala/external-properties/MatchQueryTest.scala b/src/test/scala/external-properties/MatchQueryTest.scala deleted file mode 100644 index 9c2e16c7..00000000 --- a/src/test/scala/external-properties/MatchQueryTest.scala +++ /dev/null @@ -1,98 +0,0 @@ -import java.io.File - -import org.junit._ -import org.neo4j.graphdb.factory.GraphDatabaseFactory -import org.neo4j.graphdb.{GraphDatabaseService, Label, RelationshipType, Result} -import org.neo4j.io.fs.FileUtils -import org.neo4j.kernel.impl.{CustomPropertyNodeStoreHolder, InMemoryPropertyNodeStore, LoggingPropertiesStore, Settings} - - -class MatchQueryTest { - Settings._hookEnabled = true - val tmpns = new InMemoryPropertyNodeStore() - CustomPropertyNodeStoreHolder.hold(new LoggingPropertiesStore(tmpns)) - - var db: GraphDatabaseService = null - - @Before - def initdb(): Unit = { - new File("./output/testdb").mkdirs() - FileUtils.deleteRecursively(new File("./output/testdb")) - db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - //db.shutdown() - val tx = db.beginTx() - - val queryStr = - """ - |CREATE (m1:Movie{title:"Wall Street"}),(m2:Movie{title:"The American President"}), - |(p1:Person{name: 'Oliver Stone'}),(p2:Person{name: 'Michael Douglas'}), - |(p3:Person{name: 'Charlie Sheen'}),(p4:Person{name: 'Martin Sheen'}), - |(p5:Person{name: 'Rob Reiner'}) - |WITH m1,m2,p1,p2,p3,p4,p5 - |CREATE (p1)-[:DIRECTED]->(b) - |CREATE (p2)-[:ACTED_IN{role: 'Gordon Gekko'}]->(m1) - |CREATE (p2)-[:ACTED_IN{role: 'President Andrew Shepherd'}]->(m1) - |CREATE (p3)-[:ACTED_IN{role: 'Bud Fox'}]->(m1) - |CREATE (p4)-[:ACTED_IN{role: 'Carl Fox'}]->(m1) - |CREATE (p4)-[:ACTED_IN{role: 'A.J. MacInerney'}]->(m2) - |CREATE (p5)-[:DIRECTED]->(m2) - |""".stripMargin - db.execute(queryStr) - tx.success() - tx.close() - } - - @After - def closeDb(): Unit = { - db.shutdown() - } - - - protected def assertResultRowsCount(rowsCount: Int, queryStr: String="match (n) return n"): Unit = { - // val result = testQuery(queryStr) - var size = 0 - val tx = db.beginTx() - val rs = db.execute(queryStr) - while (rs.hasNext) { - val row = rs.next() - println(row) - size += 1 - } - tx.success() - tx.close() - assert(rowsCount == size) - } - - - @Test - def test1(): Unit = { - // Get all nodes - assertResultRowsCount(6) - } - - @Test - def test2(): Unit = { - // Get all nodes with a label - assertResultRowsCount(2, "MATCH (movie:Movie) RETURN movie.title") - } - - @Test - def test3(): Unit = { - // Get nodes without label - - //fixme: this will be throw ClassCastException from InMemoryPropertyNodeStore.NFEquals - assertResultRowsCount(1, "MATCH (movie{title:'Wall Street'}) RETURN movie.title") - - } - - @Test - def test4(): Unit = { - // Get nodes with filter - - //fixme: this will be throw ClassCastException from InMemoryPropertyNodeStore.NFEquals - assertResultRowsCount(1, "MATCH (movie) where movie.title='Wall Street' RETURN movie") - } - - - -} \ No newline at end of file diff --git a/src/test/scala/external-properties/NodeByLabelScanPipeQueryTest.scala b/src/test/scala/external-properties/NodeByLabelScanPipeQueryTest.scala deleted file mode 100644 index 74860603..00000000 --- a/src/test/scala/external-properties/NodeByLabelScanPipeQueryTest.scala +++ /dev/null @@ -1,84 +0,0 @@ - -import java.io.File - -import org.junit.{Assert, Before, Test} -import org.neo4j.graphdb.factory.GraphDatabaseFactory -import org.neo4j.graphdb.{Label, RelationshipType} -import org.neo4j.io.fs.FileUtils -import org.neo4j.kernel.impl.{CustomPropertyNodeStoreHolder, InMemoryPropertyNodeStore, LoggingPropertiesStore, Settings} - - -trait NodeByLabelScanPipeQueryTestBase { - Settings._hookEnabled = false; - - @Before - def initdb(): Unit = { - new File("./output/testdb").mkdirs(); - FileUtils.deleteRecursively(new File("./output/testdb")); - val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - - val tx = db.beginTx(); - //create a node - val node1 = db.createNode(); - - node1.setProperty("name", "test01"); - node1.setProperty("age", 10); - node1.addLabel(new Label { - override def name(): String = "man" - }) - - val node2 = db.createNode(); - node2.setProperty("name", "test02"); - node2.setProperty("age", 40); - node2.addLabel(new Label { - override def name(): String = "person" - }) - - val node3 = db.createNode(); - node3.setProperty("name", "test03"); - node3.setProperty("age", 40); - node3.addLabel(new Label { - override def name(): String = "person" - }) - - tx.success() - tx.close() - db.shutdown(); - } - - protected def testQuery(query: String): Int = { - initdb(); - val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - val tx = db.beginTx(); - val rs = db.execute(query); - var count = 0 - while (rs.hasNext) { - count += 1 - val row = rs.next(); - println(row); - } - tx.success(); - tx.close() - db.shutdown(); - count - } -} - -class NodeByLabelScanPipeQueryTest extends NodeByLabelScanPipeQueryTestBase { - Settings._hookEnabled = true; - val tmpns = new InMemoryPropertyNodeStore() - CustomPropertyNodeStoreHolder.hold(new LoggingPropertiesStore(tmpns)); - - - @Test - def test1(): Unit = { - - val rows1 = testQuery("MATCH (n: man) RETURN n.name"); - Assert.assertEquals(1, rows1) - - val rows2 = testQuery("MATCH (n: person) RETURN n.name"); - Assert.assertEquals(2, rows2) - - } - -} diff --git a/src/test/scala/external-properties/QueryTest.scala b/src/test/scala/external-properties/QueryTest.scala deleted file mode 100644 index 1df2888c..00000000 --- a/src/test/scala/external-properties/QueryTest.scala +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Created by bluejoe on 2019/9/15. - */ - -import java.io.File - -import org.junit.{Before, Test} -import org.neo4j.graphdb.factory.GraphDatabaseFactory -import org.neo4j.graphdb.{Label, RelationshipType} -import org.neo4j.io.fs.FileUtils -import org.neo4j.kernel.impl.{CustomPropertyNodeStoreHolder, InMemoryPropertyNodeStore, LoggingPropertiesStore, Settings} - -class QueryTest extends QueryTestBase { - Settings._hookEnabled = false; - - @Test - def test1(): Unit = { - testQuery("match (m)-[dad]->(n) where 18>m.age return n.name, m"); - } - - @Test - def test2(): Unit = { - testQuery("match (m)-[dad]->(x)-[brother]-(n) where m.age<18 and n.age>30 return n.name, m.name, x"); - } -} - -class QueryWithinSolrTest extends QueryTestBase { - Settings._hookEnabled = true; - CustomPropertyNodeStoreHolder.hold(new LoggingPropertiesStore(new InMemoryPropertyNodeStore())); - - @Test - def test1(): Unit = { - testQuery("match (m)-[dad]->(n) where 18>m.age return n.name, m"); - //testQuery("match (m)-[dad]->(n) where m.name=~ '.*lue.*' return m.name, m"); - //testQuery("match (m)-[dad]->(n) where m.age>18 return n.name, m"); - //testQuery("match (m) where m.age<>39 return m"); - //testQuery("match (m) where m.name=~ '.*ue.*' return m"); - //testQuery("MATCH (n {name:'bluejoe'}) SET n:professional set n.age=45 SET n.worktime=20 RETURN n"); - //testQuery("MATCH (n {name:'bluejoe'}) SET n={age:20}"); - //testQuery("match (m) where m.name=~ '(?i)B.*' return m") - } - - // test beforeCommit update nodes - @Test - def test2(): Unit = { - testQuery("match (n) return n.name") - } -} - -trait QueryTestBase { - Settings._hookEnabled = false; - - @Before - def initdb(): Unit = { - new File("./output/testdb").mkdirs(); - FileUtils.deleteRecursively(new File("./output/testdb")); - val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - - val tx = db.beginTx(); - //create a node - val node1 = db.createNode(); - - node1.setProperty("name", "bluejoe"); - node1.setProperty("age", 40); - node1.addLabel(new Label { - override def name(): String = "man" - }) - - val node2 = db.createNode(); - node2.setProperty("name", "alex"); - //with a blob property - node2.setProperty("age", 10); - node2.addLabel(new Label { - override def name(): String = "kid" - }) - - val node3 = db.createNode(); - - node3.setProperty("name", "alan"); - node3.setProperty("age", 39); - node3.addLabel(new Label { - override def name(): String = "man" - }) - - node2.createRelationshipTo(node1, new RelationshipType { - override def name(): String = "dad" - }); - - node3.createRelationshipTo(node1, new RelationshipType { - override def name(): String = "brother" - }); - - // test beforeCommit update properties - val node4 = db.createNode() - node4.setProperty("name", "test1") - node4.setProperty("name", "updated test1 name") - - tx.success(); - tx.close(); - db.shutdown(); - } - - protected def testQuery(query: String): Unit = { - initdb(); - val db = new GraphDatabaseFactory().newEmbeddedDatabase(new File("./output/testdb")) - val tx = db.beginTx(); - val rs = db.execute(query); - while (rs.hasNext) { - val row = rs.next(); - println(row); - } - - tx.success(); - db.shutdown(); - } -} \ No newline at end of file diff --git a/tools/pom.xml b/tools/pom.xml new file mode 100644 index 00000000..13edf9ab --- /dev/null +++ b/tools/pom.xml @@ -0,0 +1,33 @@ + + + + parent + cn.pandadb + 0.0.2 + + 4.0.0 + + cn.pandadb + tools + + + + cn.pandadb + java-driver + ${pandadb.version} + + + cn.pandadb + server + ${pandadb.version} + + + cn.pandadb + aipm-library + ${pandadb.version} + + + + \ No newline at end of file diff --git a/tools/src/main/java/cn/pandadb/tool/JUnsafePNodeLauncher.java b/tools/src/main/java/cn/pandadb/tool/JUnsafePNodeLauncher.java new file mode 100644 index 00000000..e2e8259a --- /dev/null +++ b/tools/src/main/java/cn/pandadb/tool/JUnsafePNodeLauncher.java @@ -0,0 +1,17 @@ +package cn.pandadb.tool; + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 10:56 2019/12/26 + * @Modified By: + */ +public class JUnsafePNodeLauncher { + public static void main(String[] args) { + String num = args[0]; + String dbPathStr = "../itest/output/testdb/db" + num; + String confFilePathStr = "../itest/testdata/localnode" + num + ".conf"; + String[] startArgs = {dbPathStr, confFilePathStr}; + PNodeServerStarter.main(startArgs); + } +} diff --git a/src/graiph-database/scala/cn/graiph/driver/connector.scala b/tools/src/main/scala/cn/pandadb/connector/BoltService.scala similarity index 97% rename from src/graiph-database/scala/cn/graiph/driver/connector.scala rename to tools/src/main/scala/cn/pandadb/connector/BoltService.scala index bfdbe82b..d3d914c7 100644 --- a/src/graiph-database/scala/cn/graiph/driver/connector.scala +++ b/tools/src/main/scala/cn/pandadb/connector/BoltService.scala @@ -1,11 +1,11 @@ -package cn.graiph.driver +package cn.pandadb.driver import org.neo4j.driver._ import scala.collection.JavaConversions import scala.collection.JavaConversions._ import scala.reflect.ClassTag -import cn.graiph.util.Logging +import cn.pandadb.util.Logging class BoltService(url: String, user: String = "", pass: String = "") extends Logging with CypherService { diff --git a/src/graiph-database/scala/cn/graiph/driver/CypherService.scala b/tools/src/main/scala/cn/pandadb/connector/CypherService.scala similarity index 95% rename from src/graiph-database/scala/cn/graiph/driver/CypherService.scala rename to tools/src/main/scala/cn/pandadb/connector/CypherService.scala index 207e44ce..61b225f3 100644 --- a/src/graiph-database/scala/cn/graiph/driver/CypherService.scala +++ b/tools/src/main/scala/cn/pandadb/connector/CypherService.scala @@ -1,6 +1,6 @@ -package cn.graiph.driver +package cn.pandadb.driver -import cn.graiph.util.Logging +import cn.pandadb.util.Logging import org.neo4j.driver.{Record, Session, StatementResult} import scala.reflect.ClassTag diff --git a/src/graiph-database/scala/cn/graiph/driver/RemoteGraiph.scala b/tools/src/main/scala/cn/pandadb/connector/RemotePanda.scala similarity index 65% rename from src/graiph-database/scala/cn/graiph/driver/RemoteGraiph.scala rename to tools/src/main/scala/cn/pandadb/connector/RemotePanda.scala index 89784fc1..e0141bf3 100644 --- a/src/graiph-database/scala/cn/graiph/driver/RemoteGraiph.scala +++ b/tools/src/main/scala/cn/pandadb/connector/RemotePanda.scala @@ -1,11 +1,11 @@ -package cn.graiph.driver +package cn.pandadb.driver -import cn.graiph.util.Logging +import cn.pandadb.util.Logging /** * Created by bluejoe on 2019/7/17. */ -object RemoteGraiph extends Logging { +object RemotePanda extends Logging { def connect(url: String, user: String = "", pass: String = ""): CypherService = { new BoltService(url, user, pass); } diff --git a/src/graiph-database/scala/cn/graiph/db/connector.scala b/tools/src/main/scala/cn/pandadb/connector/local.scala similarity index 95% rename from src/graiph-database/scala/cn/graiph/db/connector.scala rename to tools/src/main/scala/cn/pandadb/connector/local.scala index febddceb..0c3fab30 100644 --- a/src/graiph-database/scala/cn/graiph/db/connector.scala +++ b/tools/src/main/scala/cn/pandadb/connector/local.scala @@ -1,10 +1,10 @@ -package cn.graiph.db +package cn.pandadb.db import java.util.function.Function import java.util.stream.Stream -import cn.graiph.driver.CypherService -import cn.graiph.util.Logging +import cn.pandadb.driver.CypherService +import cn.pandadb.util.Logging import org.neo4j.driver._ import org.neo4j.driver.internal.types.InternalMapAccessorWithDefaultValue import org.neo4j.driver.internal.value.{NodeValue, RelationshipValue} @@ -18,7 +18,6 @@ import scala.collection.JavaConversions._ class LocalGraphService(db: GraphDatabaseService) extends Logging with CypherService { - override def queryObjects[T: ClassManifest](queryString: String, fnMap: (Record) => T): Iterator[T] = ??? override def execute[T](f: (Session) => T): T = { throw new UnsupportedOperationException(); @@ -117,10 +116,12 @@ class LocalGraphService(db: GraphDatabaseService) override def list[T](mapFunction: Function[Record, T]): java.util.List[T] = list().map(mapFunction.apply(_)) override def single(): Record = - if (hasNext) + if (hasNext) { null; - else + } + else { next(); + } override def stream(): Stream[Record] = { //TODO @@ -128,4 +129,7 @@ class LocalGraphService(db: GraphDatabaseService) } } + override def queryObjects[T: ClassManifest](queryString: String, fnMap: (Record) => T): Iterator[T] = { + null + } } \ No newline at end of file diff --git a/tools/src/main/scala/cn/pandadb/tool/PNodeServerStarter.scala b/tools/src/main/scala/cn/pandadb/tool/PNodeServerStarter.scala new file mode 100644 index 00000000..fd4a851f --- /dev/null +++ b/tools/src/main/scala/cn/pandadb/tool/PNodeServerStarter.scala @@ -0,0 +1,35 @@ +package cn.pandadb.tool + +import java.io.File + +import cn.pandadb.server.PNodeServer +import cn.pandadb.util.GlobalContext + +/** + * Created by bluejoe on 2019/7/17. + */ +object PNodeServerStarter { + def main(args: Array[String]) { + if (args.length != 2) { + sys.error(s"Usage:\r\n"); + sys.error(s"GNodeServerStarter \r\n"); + } + + PNodeServer.startServer(new File(args(0)), + new File(args(1))); + } +} + +object WatchDogStarter { + def main(args: Array[String]) { + if (args.length != 2) { + sys.error(s"Usage:\r\n"); + sys.error(s"WatchDogStarter \r\n"); + } + + GlobalContext.setWatchDog(true); + //FIXME: redundant storedir + PNodeServer.startServer(new File(args(0)), + new File(args(1))); + } +} \ No newline at end of file diff --git a/tools/src/main/scala/cn/pandadb/tool/UnsafeCleaner.scala b/tools/src/main/scala/cn/pandadb/tool/UnsafeCleaner.scala new file mode 100644 index 00000000..ff79de55 --- /dev/null +++ b/tools/src/main/scala/cn/pandadb/tool/UnsafeCleaner.scala @@ -0,0 +1,10 @@ +package cn.pandadb.tool + +/** + * Created by bluejoe on 2019/11/21. + */ +object UnsafeCleaner { + def clean(zkHosts: String): Unit = { + + } +} diff --git a/tools/src/main/scala/cn/pandadb/tool/UnsafeImporter.scala b/tools/src/main/scala/cn/pandadb/tool/UnsafeImporter.scala new file mode 100644 index 00000000..54d6f89c --- /dev/null +++ b/tools/src/main/scala/cn/pandadb/tool/UnsafeImporter.scala @@ -0,0 +1,13 @@ +package cn.pandadb.tool + +import java.io.File + +/** + * Created by bluejoe on 2019/11/21. + */ +object UnsafeImporter { + + def importData(zkHosts: String, csv: File): Unit = { + + } +} diff --git a/tools/src/main/scala/cn/pandadb/tool/UnsafePNodeLauncher.scala b/tools/src/main/scala/cn/pandadb/tool/UnsafePNodeLauncher.scala new file mode 100644 index 00000000..a30cb3ac --- /dev/null +++ b/tools/src/main/scala/cn/pandadb/tool/UnsafePNodeLauncher.scala @@ -0,0 +1,15 @@ +package cn.pandadb.tool + +/** + * @Author: Airzihao + * @Description: + * @Date: Created in 22:27 2019/12/25 + * @Modified By: + */ +object UnsafePNodeLauncher { + def main(args: Array[String]): Unit = { + val num = args(0) + PNodeServerStarter.main(Array(s"./itest/output/testdb/db${num}", + s"./itest/testdata/localnode${num}.conf")) + } +} diff --git a/tools/src/test/scala/UnsafePNodeLauncherTest.scala b/tools/src/test/scala/UnsafePNodeLauncherTest.scala new file mode 100644 index 00000000..253f54b8 --- /dev/null +++ b/tools/src/test/scala/UnsafePNodeLauncherTest.scala @@ -0,0 +1,46 @@ +import java.io.{File, FileInputStream} +import java.util.Properties + +import cn.pandadb.network.NodeAddress +import org.junit.Test + +import sys.process._ + +/** + * @Author: Airzihao + * @Description: + * @Date: Created at 20:04 2019/12/25 + * @Modified By: + */ +class UnsafePNodeLauncherTest { + + @Test + def test1(): Unit = { + val num = 0 + val startCmd = s"cmd.exe /c mvn exec:java -Dexec.mainClass='cn.pandadb.tool.UnsafePNodeLauncher' -Dexec.args=${num}" !!; + val confFile = new File(s"./itest/testdata/localnode${num}.conf") + Thread.sleep(999999) + } + +} + +import sys.process._ +class PandaDBTestBase { + var serialNum = 0 + + def startLocalPNodeServer(): NodeAddress = { + val startCmd = s"cmd.exe /c mvn exec:java -Dexec.mainClass='cn.pandadb.tool.UnsafePNodeLauncher' -Dexec.args=${serialNum}" + startCmd !!; + val localNodeAddress = _getLocalNodeAddressFromFile(new File(s"./itest/testdata/localnode${serialNum}.conf")) + serialNum += 1; + Thread.sleep(100000) + localNodeAddress + } + + // For base test's use. + private def _getLocalNodeAddressFromFile(confFile: File): NodeAddress = { + val props = new Properties() + props.load(new FileInputStream(confFile)) + NodeAddress.fromString(props.getProperty("node.server.address")) + } +}