-
Notifications
You must be signed in to change notification settings - Fork 9
[SPARK-23078] add tests for Spark Thrift Server on Kubernetes #38
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
ozzieba
wants to merge
12
commits into
apache-spark-on-k8s:master
Choose a base branch
from
ozzieba:master
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from 10 commits
Commits
Show all changes
12 commits
Select commit
Hold shift + click to select a range
0e7a4ed
add test for spark thrift server on kubernetes
b43d137
fixed some dependency issues
ozzieba 66b1771
dynamic port
befc2cc
eventually
0236305
fix dependencies, again
ozzieba 95b6b55
build spark with hive and thrift
ozzieba 528b064
merge master
ozzieba 72d79e4
removed duplicate dep
ozzieba 0b5f09e
changed remaining PP_LOCATOR_LABEL
ozzieba 4c8fe2f
addressed comments
ozzieba 36dee22
spacing
ozzieba cef7f5e
resolved conflicts
ozzieba File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,10 +20,12 @@ import java.io.File | |
import java.nio.file.{Path, Paths} | ||
import java.util.UUID | ||
import java.util.regex.Pattern | ||
import java.sql.DriverManager | ||
|
||
import scala.collection.JavaConverters._ | ||
import com.google.common.io.PatternFilenameFilter | ||
import io.fabric8.kubernetes.api.model.{Container, Pod} | ||
import org.apache.hive.jdbc.HiveDriver | ||
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuite} | ||
import org.scalatest.concurrent.{Eventually, PatienceConfiguration} | ||
import org.scalatest.time.{Minutes, Seconds, Span} | ||
|
@@ -121,6 +123,10 @@ private[spark] class KubernetesSuite extends FunSuite with BeforeAndAfterAll wit | |
runSparkPiAndVerifyCompletion(appArgs = Array("5")) | ||
} | ||
|
||
test("Run Spark Thrift Server") { | ||
runThriftServerAndVerifyQuery() | ||
} | ||
|
||
test("Run SparkPi with custom labels, annotations, and environment variables.") { | ||
sparkAppConf | ||
.set("spark.kubernetes.driver.label.label1", "label1-value") | ||
|
@@ -239,6 +245,46 @@ private[spark] class KubernetesSuite extends FunSuite with BeforeAndAfterAll wit | |
appLocator) | ||
} | ||
|
||
private def runThriftServerAndVerifyQuery( | ||
driverPodChecker: Pod => Unit = doBasicDriverPodCheck, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These should be 4 spaces indention. |
||
appArgs: Array[String] = Array.empty[String], | ||
appLocator: String = appLocator): Unit = { | ||
val appArguments = SparkAppArguments( | ||
mainAppResource = "", | ||
mainClass = "org.apache.spark.sql.hive.thriftserver.HiveThriftServer2", | ||
appArgs = appArgs) | ||
SparkAppLauncher.launch(appArguments, sparkAppConf, TIMEOUT.value.toSeconds.toInt, sparkHomeDir) | ||
val driverPod = kubernetesTestComponents.kubernetesClient | ||
.pods | ||
.withLabel("spark-app-locator", appLocator) | ||
.withLabel("spark-role", "driver") | ||
.list() | ||
.getItems | ||
.get(0) | ||
driverPodChecker(driverPod) | ||
val driverPodResource = kubernetesTestComponents.kubernetesClient | ||
.pods | ||
.withName(driverPod.getMetadata.getName) | ||
|
||
Eventually.eventually(TIMEOUT, INTERVAL) { | ||
val localPort = driverPodResource.portForward(10000).getLocalPort | ||
val jdbcUri = s"jdbc:hive2://localhost:$localPort/" | ||
val connection = DriverManager.getConnection(jdbcUri, "user", "pass") | ||
val statement = connection.createStatement() | ||
try { | ||
val resultSet = statement.executeQuery("select 42") | ||
resultSet.next() | ||
assert(resultSet.getInt(1) == 42) | ||
} finally { | ||
try { | ||
statement.close() | ||
} finally { | ||
connection.close() | ||
} | ||
} | ||
} | ||
} | ||
|
||
private def runSparkApplicationAndVerifyCompletion( | ||
appResource: String, | ||
mainClass: String, | ||
|
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why do you need to depend on
hadoop-common
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Without it I get
java.lang.ClassNotFoundException: org.apache.hadoop.conf.Configuration
. In general, I believe Hive JDBC requires some Hadoop dependencies, as discussed in eg https://issues.apache.org/jira/browse/HIVE-15110. Not sure if it's only configuration. Do you have a cleaner solution?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ah, thanks for the info.