Skip to content

[SPARK-46432][BUILD] Upgrade Netty to 4.1.104.Final

Sign in for the full log view
GitHub Actions / Report test results failed Dec 17, 2023 in 0s

37861 tests run, 737 skipped, 158 failed.

Annotations

Check failure on line 40 in ArrowUtilsSuite

See this annotation in the file changed.

@github-actions github-actions / Report test results

ArrowUtilsSuite.(It is not a test it is a sbt.testing.SuiteSelector)

sbt.ForkMain$ForkError: java.lang.NoSuchFieldError: chunkSize
Raw output
sbt.ForkMain$ForkError: sbt.ForkMain$ForkError: java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.util.ArrowUtilsSuite.roundtrip(ArrowUtilsSuite.scala:33)
	at org.apache.spark.sql.util.ArrowUtilsSuite.roundtrip(ArrowUtilsSuite.scala:35)
	at org.apache.spark.sql.util.ArrowUtilsSuite.$anonfun$new$1(ArrowUtilsSuite.scala:40)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
	at org.scalatest.enablers.Timed$$anon$1.timeoutAfter(Timed.scala:127)
	at org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:282)
	at org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
	at org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
	at org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
	at org.apache.spark.SparkFunSuite.$anonfun$test$2(SparkFunSuite.scala:155)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:226)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:227)
	at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:224)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:236)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:236)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:218)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:69)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:69)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:269)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:269)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:268)
	at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1564)
	at org.scalatest.Suite.run(Suite.scala:1114)
	at org.scalatest.Suite.run$(Suite.scala:1096)
	at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1564)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:273)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
	at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:273)
	at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:272)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:69)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:69)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:321)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:517)
	at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:414)
	at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/connect/client/test_artifact.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/client/test_artifact.py.test_add_archive

Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0) (localhost executor driver): java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 132, in test_add_archive
    self.check_add_archive(self.spark)
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 129, in check_add_archive
    self.assertEqual(spark_session.range(1).select(func("id")).first()[0], "hello world!")
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 479, in first
    return self.head()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0) (localhost executor driver): java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/connect/client/test_artifact.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/client/test_artifact.py.test_add_file

Job aborted due to stage failure: Task 1 in stage 1.0 failed 1 times, most recent failure: Lost task 1.0 in stage 1.0 (TID 5) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 160, in test_add_file
    self.check_add_file(self.spark)
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 157, in check_add_file
    self.assertEqual(spark_session.range(1).select(func("id")).first()[0], "Hello world!!")
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 479, in first
    return self.head()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 1 in stage 1.0 failed 1 times, most recent failure: Lost task 1.0 in stage 1.0 (TID 5) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/connect/client/test_artifact.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/client/test_artifact.py.test_add_pyfile

Job aborted due to stage failure: Task 2 in stage 2.0 failed 1 times, most recent failure: Lost task 2.0 in stage 2.0 (TID 10) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 52, in test_add_pyfile
    self.check_add_pyfile(self.spark)
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 49, in check_add_pyfile
    self.assertEqual(spark_session.range(1).select(func("id")).first()[0], 10)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 479, in first
    return self.head()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 2 in stage 2.0 failed 1 times, most recent failure: Lost task 2.0 in stage 2.0 (TID 10) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/connect/client/test_artifact.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/client/test_artifact.py.test_add_zipped_package

Job aborted due to stage failure: Task 2 in stage 3.0 failed 1 times, most recent failure: Lost task 2.0 in stage 3.0 (TID 14) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 98, in test_add_zipped_package
    self.check_add_zipped_package(self.spark)
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 95, in check_add_zipped_package
    self.assertEqual(spark_session.range(1).select(func("id")).first()[0], 5)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 479, in first
    return self.head()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 2 in stage 3.0 failed 1 times, most recent failure: Lost task 2.0 in stage 3.0 (TID 14) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 0.0 (TID 0)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/connect/client/test_artifact.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/client/test_artifact.py.test_add_archive

Job aborted due to stage failure: Task 1 in stage 0.0 failed 4 times, most recent failure: Lost task 1.3 in stage 0.0 (TID 6) (localhost executor 0): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.3 in stage 0.0 (TID 7)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.3 in stage 0.0 (TID 7)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 132, in test_add_archive
    self.check_add_archive(self.spark)
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 129, in check_add_archive
    self.assertEqual(spark_session.range(1).select(func("id")).first()[0], "hello world!")
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 479, in first
    return self.head()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 1 in stage 0.0 failed 4 times, most recent failure: Lost task 1.3 in stage 0.0 (TID 6) (localhost executor 0): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.3 in stage 0.0 (TID 7)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.3 in stage 0.0 (TID 7)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/connect/client/test_artifact.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/client/test_artifact.py.test_add_file

Job aborted due to stage failure: Task 1 in stage 1.0 failed 4 times, most recent failure: Lost task 1.3 in stage 1.0 (TID 18) (localhost executor 1): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 1.0 in stage 0.0 (TID 1)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 1.0 in stage 0.0 (TID 1)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 160, in test_add_file
    self.check_add_file(self.spark)
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 157, in check_add_file
    self.assertEqual(spark_session.range(1).select(func("id")).first()[0], "Hello world!!")
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 479, in first
    return self.head()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 1 in stage 1.0 failed 4 times, most recent failure: Lost task 1.3 in stage 1.0 (TID 18) (localhost executor 1): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 1.0 in stage 0.0 (TID 1)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 1.0 in stage 0.0 (TID 1)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/connect/client/test_artifact.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/client/test_artifact.py.test_add_pyfile

Job aborted due to stage failure: Task 2 in stage 2.0 failed 4 times, most recent failure: Lost task 2.3 in stage 2.0 (TID 34) (localhost executor 1): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 1.0 in stage 0.0 (TID 1)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 1.0 in stage 0.0 (TID 1)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 52, in test_add_pyfile
    self.check_add_pyfile(self.spark)
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 49, in check_add_pyfile
    self.assertEqual(spark_session.range(1).select(func("id")).first()[0], 10)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 479, in first
    return self.head()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 2 in stage 2.0 failed 4 times, most recent failure: Lost task 2.3 in stage 2.0 (TID 34) (localhost executor 1): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 1.0 in stage 0.0 (TID 1)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 1.0 in stage 0.0 (TID 1)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/connect/client/test_artifact.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/client/test_artifact.py.test_add_zipped_package

Job aborted due to stage failure: Task 1 in stage 3.0 failed 4 times, most recent failure: Lost task 1.3 in stage 3.0 (TID 49) (localhost executor 0): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.3 in stage 0.0 (TID 7)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.3 in stage 0.0 (TID 7)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 98, in test_add_zipped_package
    self.check_add_zipped_package(self.spark)
  File "/__w/spark/spark/python/pyspark/sql/tests/connect/client/test_artifact.py", line 95, in check_add_zipped_package
    self.assertEqual(spark_session.range(1).select(func("id")).first()[0], 5)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 479, in first
    return self.head()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 1 in stage 3.0 failed 4 times, most recent failure: Lost task 1.3 in stage 3.0 (TID 49) (localhost executor 0): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.3 in stage 0.0 (TID 7)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	... 20 more

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.3 in stage 0.0 (TID 7)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/tests/test_memory_profiler.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/tests/test_memory_profiler.py.test_profile_pandas_function_api

An error occurred while calling o173.collectToPython.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 2.0 failed 1 times, most recent failure: Lost task 0.0 in stage 2.0 (TID 4) (localhost executor driver): java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonRunner.<init>(ArrowPythonRunner.scala:81)
	at org.apache.spark.sql.execution.python.FlatMapGroupsInBatchExec.$anonfun$doExecute$1(FlatMapGroupsInBatchExec.scala:92)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:992)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2428)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2449)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2468)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2493)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1047)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:408)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1046)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:448)
	at org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.$anonfun$executeCollect$1(AdaptiveSparkPlanExec.scala:390)
	at org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.withFinalPlanUpdate(AdaptiveSparkPlanExec.scala:418)
	at org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.executeCollect(AdaptiveSparkPlanExec.scala:390)
	at org.apache.spark.sql.Dataset.$anonfun$collectToPython$1(Dataset.scala:4258)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:4432)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:4430)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:4430)
	at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:4255)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonRunner.<init>(ArrowPythonRunner.scala:81)
	at org.apache.spark.sql.execution.python.FlatMapGroupsInBatchExec.$anonfun$doExecute$1(FlatMapGroupsInBatchExec.scala:92)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	... 1 more
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/tests/test_memory_profiler.py", line 113, in test_profile_pandas_function_api
    api()
  File "/__w/spark/spark/python/pyspark/tests/test_memory_profiler.py", line 170, in exec_grouped_map
    df.groupby("id").applyInPandas(grouped_map, schema="id long, v double").collect()
  File "/__w/spark/spark/python/pyspark/sql/dataframe.py", line 1374, in collect
    sock_info = self._jdf.collectToPython()
  File "/__w/spark/spark/python/lib/py4j-0.10.9.7-src.zip/py4j/java_gateway.py", line 1322, in __call__
    return_value = get_return_value(
  File "/__w/spark/spark/python/pyspark/errors/exceptions/captured.py", line 213, in deco
    return f(*a, **kw)
  File "/__w/spark/spark/python/lib/py4j-0.10.9.7-src.zip/py4j/protocol.py", line 326, in get_return_value
    raise Py4JJavaError(
py4j.protocol.Py4JJavaError: An error occurred while calling o173.collectToPython.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 2.0 failed 1 times, most recent failure: Lost task 0.0 in stage 2.0 (TID 4) (localhost executor driver): java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonRunner.<init>(ArrowPythonRunner.scala:81)
	at org.apache.spark.sql.execution.python.FlatMapGroupsInBatchExec.$anonfun$doExecute$1(FlatMapGroupsInBatchExec.scala:92)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:992)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2428)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2449)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2468)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2493)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1047)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:408)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1046)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:448)
	at org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.$anonfun$executeCollect$1(AdaptiveSparkPlanExec.scala:390)
	at org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.withFinalPlanUpdate(AdaptiveSparkPlanExec.scala:418)
	at org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.executeCollect(AdaptiveSparkPlanExec.scala:390)
	at org.apache.spark.sql.Dataset.$anonfun$collectToPython$1(Dataset.scala:4258)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:4432)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:4430)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:4430)
	at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:4255)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.NoSuchFieldError: chunkSize
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonRunner.<init>(ArrowPythonRunner.scala:81)
	at org.apache.spark.sql.execution.python.FlatMapGroupsInBatchExec.$anonfun$doExecute$1(FlatMapGroupsInBatchExec.scala:92)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	... 1 more

Check failure on line 1 in python/pyspark/tests/test_memory_profiler.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/tests/test_memory_profiler.py.test_profile_pandas_udf

An error occurred while calling o235.collectToPython.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 3 in stage 0.0 failed 1 times, most recent failure: Lost task 3.0 in stage 0.0 (TID 3) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonWithNamedArgumentRunner.<init>(ArrowPythonRunner.scala:103)
	at org.apache.spark.sql.execution.python.ArrowEvalPythonEvaluatorFactory.evaluate(ArrowEvalPythonExec.scala:121)
	at org.apache.spark.sql.execution.python.EvalPythonEvaluatorFactory$EvalPythonPartitionEvaluator.eval(EvalPythonEvaluatorFactory.scala:112)
	at org.apache.spark.sql.execution.python.EvalPythonExec.$anonfun$doExecute$2(EvalPythonExec.scala:77)
	at org.apache.spark.sql.execution.python.EvalPythonExec.$anonfun$doExecute$2$adapted(EvalPythonExec.scala:76)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndexInternal$2(RDD.scala:878)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndexInternal$2$adapted(RDD.scala:878)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 2.0 (TID 4)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonRunner.<init>(ArrowPythonRunner.scala:81)
	at org.apache.spark.sql.execution.python.FlatMapGroupsInBatchExec.$anonfun$doExecute$1(FlatMapGroupsInBatchExec.scala:92)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	... 17 more

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:992)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2428)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2449)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2468)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2493)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1047)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:408)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1046)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:448)
	at org.apache.spark.sql.Dataset.$anonfun$collectToPython$1(Dataset.scala:4258)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:4432)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:4430)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:4430)
	at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:4255)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonWithNamedArgumentRunner.<init>(ArrowPythonRunner.scala:103)
	at org.apache.spark.sql.execution.python.ArrowEvalPythonEvaluatorFactory.evaluate(ArrowEvalPythonExec.scala:121)
	at org.apache.spark.sql.execution.python.EvalPythonEvaluatorFactory$EvalPythonPartitionEvaluator.eval(EvalPythonEvaluatorFactory.scala:112)
	at org.apache.spark.sql.execution.python.EvalPythonExec.$anonfun$doExecute$2(EvalPythonExec.scala:77)
	at org.apache.spark.sql.execution.python.EvalPythonExec.$anonfun$doExecute$2$adapted(EvalPythonExec.scala:76)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndexInternal$2(RDD.scala:878)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndexInternal$2$adapted(RDD.scala:878)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	... 1 more
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 2.0 (TID 4)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonRunner.<init>(ArrowPythonRunner.scala:81)
	at org.apache.spark.sql.execution.python.FlatMapGroupsInBatchExec.$anonfun$doExecute$1(FlatMapGroupsInBatchExec.scala:92)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	... 17 more
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/tests/test_memory_profiler.py", line 95, in test_profile_pandas_udf
    f()
  File "/__w/spark/spark/python/pyspark/tests/test_memory_profiler.py", line 141, in exec_pandas_udf_ser_to_ser
    self.spark.range(10).select(ser_to_ser("id")).collect()
  File "/__w/spark/spark/python/pyspark/sql/dataframe.py", line 1374, in collect
    sock_info = self._jdf.collectToPython()
  File "/__w/spark/spark/python/lib/py4j-0.10.9.7-src.zip/py4j/java_gateway.py", line 1322, in __call__
    return_value = get_return_value(
  File "/__w/spark/spark/python/pyspark/errors/exceptions/captured.py", line 213, in deco
    return f(*a, **kw)
  File "/__w/spark/spark/python/lib/py4j-0.10.9.7-src.zip/py4j/protocol.py", line 326, in get_return_value
    raise Py4JJavaError(
py4j.protocol.Py4JJavaError: An error occurred while calling o235.collectToPython.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 3 in stage 0.0 failed 1 times, most recent failure: Lost task 3.0 in stage 0.0 (TID 3) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonWithNamedArgumentRunner.<init>(ArrowPythonRunner.scala:103)
	at org.apache.spark.sql.execution.python.ArrowEvalPythonEvaluatorFactory.evaluate(ArrowEvalPythonExec.scala:121)
	at org.apache.spark.sql.execution.python.EvalPythonEvaluatorFactory$EvalPythonPartitionEvaluator.eval(EvalPythonEvaluatorFactory.scala:112)
	at org.apache.spark.sql.execution.python.EvalPythonExec.$anonfun$doExecute$2(EvalPythonExec.scala:77)
	at org.apache.spark.sql.execution.python.EvalPythonExec.$anonfun$doExecute$2$adapted(EvalPythonExec.scala:76)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndexInternal$2(RDD.scala:878)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndexInternal$2$adapted(RDD.scala:878)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 2.0 (TID 4)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonRunner.<init>(ArrowPythonRunner.scala:81)
	at org.apache.spark.sql.execution.python.FlatMapGroupsInBatchExec.$anonfun$doExecute$1(FlatMapGroupsInBatchExec.scala:92)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	... 17 more

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:992)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2428)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2449)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2468)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2493)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1047)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:408)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1046)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:448)
	at org.apache.spark.sql.Dataset.$anonfun$collectToPython$1(Dataset.scala:4258)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:4432)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:4430)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:4430)
	at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:4255)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonWithNamedArgumentRunner.<init>(ArrowPythonRunner.scala:103)
	at org.apache.spark.sql.execution.python.ArrowEvalPythonEvaluatorFactory.evaluate(ArrowEvalPythonExec.scala:121)
	at org.apache.spark.sql.execution.python.EvalPythonEvaluatorFactory$EvalPythonPartitionEvaluator.eval(EvalPythonEvaluatorFactory.scala:112)
	at org.apache.spark.sql.execution.python.EvalPythonExec.$anonfun$doExecute$2(EvalPythonExec.scala:77)
	at org.apache.spark.sql.execution.python.EvalPythonExec.$anonfun$doExecute$2$adapted(EvalPythonExec.scala:76)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndexInternal$2(RDD.scala:878)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsWithIndexInternal$2$adapted(RDD.scala:878)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	... 1 more
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "Executor task launch worker for task 0.0 in stage 2.0 (TID 4)"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.python.PythonArrowInput.$init$(PythonArrowInput.scala:67)
	at org.apache.spark.sql.execution.python.BaseArrowPythonRunner.<init>(ArrowPythonRunner.scala:40)
	at org.apache.spark.sql.execution.python.ArrowPythonRunner.<init>(ArrowPythonRunner.scala:81)
	at org.apache.spark.sql.execution.python.FlatMapGroupsInBatchExec.$anonfun$doExecute$1(FlatMapGroupsInBatchExec.scala:92)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	... 17 more

Check failure on line 1 in python/pyspark/ml/tests/connect/test_connect_classification.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/ml/tests/connect/test_connect_classification.py.test_binary_classes_logistic_regression

<_MultiThreadedRendezvous of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "chunkSize"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:30:09.458851892+00:00", grpc_status:2, grpc_message:"chunkSize"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py", line 76, in test_binary_classes_logistic_regression
    model = lorv2.fit(df1)
  File "/__w/spark/spark/python/pyspark/ml/connect/base.py", line 105, in fit
    return self._fit(dataset)
  File "/__w/spark/spark/python/pyspark/ml/connect/classification.py", line 232, in _fit
    num_rows, num_features, classes = dataset.select(
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_MultiThreadedRendezvous of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "chunkSize"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:30:09.458851892+00:00", grpc_status:2, grpc_message:"chunkSize"}"
>

Check failure on line 1 in python/pyspark/ml/tests/connect/test_connect_classification.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/ml/tests/connect/test_connect_classification.py.test_multi_classes_logistic_regression

<_MultiThreadedRendezvous of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:30:10.210636731+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py", line 120, in test_multi_classes_logistic_regression
    model = lorv2.fit(df1)
  File "/__w/spark/spark/python/pyspark/ml/connect/base.py", line 105, in fit
    return self._fit(dataset)
  File "/__w/spark/spark/python/pyspark/ml/connect/classification.py", line 232, in _fit
    num_rows, num_features, classes = dataset.select(
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 609, in head
    rs = self.head(1)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 611, in head
    return self.take(n)
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 616, in take
    return self.limit(num).collect()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 802, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1270, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_MultiThreadedRendezvous of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:30:10.210636731+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>

Check failure on line 1 in python/pyspark/ml/tests/connect/test_connect_classification.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/ml/tests/connect/test_connect_classification.py.test_save_load

Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "SparkConnectExecuteThread_opId=403bf1f4-6480-4fc3-afca-813e510ff614"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$InternalRowIterator.<init>(ArrowConverters.scala:258)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$InternalRowIteratorWithSchema.<init>(ArrowConverters.scala:325)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.fromBatchWithSchemaIterator(ArrowConverters.scala:358)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformLocalRelation(SparkConnectPlanner.scala:1196)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:139)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformToDF(SparkConnectPlanner.scala:531)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:161)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformProject(SparkConnectPlanner.scala:1387)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:125)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRepartition(SparkConnectPlanner.scala:353)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:144)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformProject(SparkConnectPlanner.scala:1387)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:125)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformLimit(SparkConnectPlanner.scala:1491)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:127)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.handlePlan(SparkConnectPlanExecution.scala:64)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handlePlan(ExecuteThreadRunner.scala:229)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:182)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:155)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:287)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:287)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:286)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:155)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:107)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:263)

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "SparkConnectExecuteThread_opId=403bf1f4-6480-4fc3-afca-813e510ff614"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$InternalRowIterator.<init>(ArrowConverters.scala:258)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$InternalRowIteratorWithSchema.<init>(ArrowConverters.scala:325)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.fromBatchWithSchemaIterator(ArrowConverters.scala:358)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformLocalRelation(SparkConnectPlanner.scala:1196)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:139)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformToDF(SparkConnectPlanner.scala:531)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:161)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformProject(SparkConnectPlanner.scala:1387)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:125)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRepartition(SparkConnectPlanner.scala:353)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:144)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformProject(SparkConnectPlanner.scala:1387)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:125)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformLimit(SparkConnectPlanner.scala:1491)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:127)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.handlePlan(SparkConnectPlanExecution.scala:64)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handlePlan(ExecuteThreadRunner.scala:229)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:182)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:155)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:287)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:287)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:286)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:155)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:107)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:263)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py", line 154, in test_save_load
    loaded_estimator = LORV2.load(fs_path)
  File "/__w/spark/spark/python/pyspark/ml/connect/io_utils.py", line 264, in load
    for row in file_data_df.toLocalIterator():
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1981, in toLocalIterator
    for schema_or_table in self._session.client.to_table_as_iterator(
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 787, in to_table_as_iterator
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1248, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0) (localhost executor driver): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "SparkConnectExecuteThread_opId=403bf1f4-6480-4fc3-afca-813e510ff614"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.base/java.lang.Class.forName0(Native Method)
	at java.base/java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$InternalRowIterator.<init>(ArrowConverters.scala:258)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$InternalRowIteratorWithSchema.<init>(ArrowConverters.scala:325)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.fromBatchWithSchemaIterator(ArrowConverters.scala:358)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformLocalRelation(SparkConnectPlanner.scala:1196)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:139)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformToDF(SparkConnectPlanner.scala:531)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:161)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformProject(SparkConnectPlanner.scala:1387)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:125)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRepartition(SparkConnectPlanner.scala:353)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:144)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformProject(SparkConnectPlanner.scala:1387)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:125)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformLimit(SparkConnectPlanner.scala:1491)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:127)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.handlePlan(SparkConnectPlanExecution.scala:64)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handlePlan(ExecuteThreadRunner.scala:229)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:182)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:155)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:287)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:287)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:286)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:155)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:107)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:263)

Driver stacktrace:

JVM stacktrace:
org.apache.spark.SparkException
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2822)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2819)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2819)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1260)
	at scala.Option.foreach(Option.scala:437)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1260)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3089)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3023)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3012)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.util.ArrowUtils$
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchIterator.<init>(ArrowConverters.scala:87)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$ArrowBatchWithSchemaIterator.<init>(ArrowConverters.scala:142)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.toBatchWithSchemaIterator(ArrowConverters.scala:226)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.$anonfun$rowToArrowConverter$1(SparkConnectPlanExecution.scala:87)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:891)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:891)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:628)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:96)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:631)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.lang.Thread.run(Thread.java:840)
Caused by: java.lang.ExceptionInInitializerError: Exception java.lang.NoSuchFieldError: chunkSize [in thread "SparkConnectExecuteThread_opId=403bf1f4-6480-4fc3-afca-813e510ff614"]
	at io.netty.buffer.PooledByteBufAllocatorL$InnerAllocator.<init>(PooledByteBufAllocatorL.java:153)
	at io.netty.buffer.PooledByteBufAllocatorL.<init>(PooledByteBufAllocatorL.java:49)
	at org.apache.arrow.memory.NettyAllocationManager.<clinit>(NettyAllocationManager.java:51)
	at org.apache.arrow.memory.DefaultAllocationManagerFactory.<clinit>(DefaultAllocationManagerFactory.java:26)
	at java.lang.Class.forName0(Class.java:-2)
	at java.lang.Class.forName(Class.java:375)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getFactory(DefaultAllocationManagerOption.java:108)
	at org.apache.arrow.memory.DefaultAllocationManagerOption.getDefaultAllocationManagerFactory(DefaultAllocationManagerOption.java:98)
	at org.apache.arrow.memory.BaseAllocator$Config.getAllocationManagerFactory(BaseAllocator.java:773)
	at org.apache.arrow.memory.ImmutableConfig.access$801(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$InitShim.getAllocationManagerFactory(ImmutableConfig.java:83)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:47)
	at org.apache.arrow.memory.ImmutableConfig.<init>(ImmutableConfig.java:24)
	at org.apache.arrow.memory.ImmutableConfig$Builder.build(ImmutableConfig.java:485)
	at org.apache.arrow.memory.BaseAllocator.<clinit>(BaseAllocator.java:62)
	at org.apache.spark.sql.util.ArrowUtils$.<clinit>(ArrowUtils.scala:36)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$InternalRowIterator.<init>(ArrowConverters.scala:258)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$InternalRowIteratorWithSchema.<init>(ArrowConverters.scala:325)
	at org.apache.spark.sql.execution.arrow.ArrowConverters$.fromBatchWithSchemaIterator(ArrowConverters.scala:358)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformLocalRelation(SparkConnectPlanner.scala:1196)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:139)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformToDF(SparkConnectPlanner.scala:531)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:161)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformProject(SparkConnectPlanner.scala:1387)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:125)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRepartition(SparkConnectPlanner.scala:353)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:144)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformProject(SparkConnectPlanner.scala:1387)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:125)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformLimit(SparkConnectPlanner.scala:1491)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.transformRelation(SparkConnectPlanner.scala:127)
	at org.apache.spark.sql.connect.execution.SparkConnectPlanExecution.handlePlan(SparkConnectPlanExecution.scala:64)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handlePlan(ExecuteThreadRunner.scala:229)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:182)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:155)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:287)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:918)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:287)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.artifact.ArtifactManager.$anonfun$withResources$1(ArtifactManager.scala:80)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.artifact.ArtifactManager.withResources(ArtifactManager.scala:79)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:286)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:155)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:107)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:263)

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py.test_all

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "chunkSize"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"chunkSize", grpc_status:2, created_time:"2023-12-17T09:33:26.903429017+00:00"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_any_all.py", line 41, in test_all
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "chunkSize"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"chunkSize", grpc_status:2, created_time:"2023-12-17T09:33:26.903429017+00:00"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py.test_any

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$", grpc_status:2, created_time:"2023-12-17T09:33:26.945396213+00:00"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_any_all.py", line 113, in test_any
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$", grpc_status:2, created_time:"2023-12-17T09:33:26.945396213+00:00"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_add

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "chunkSize"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:12:59.665520698+00:00", grpc_status:2, grpc_message:"chunkSize"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 44, in test_add
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "chunkSize"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:12:59.665520698+00:00", grpc_status:2, grpc_message:"chunkSize"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_floordiv

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$", grpc_status:2, created_time:"2023-12-17T09:12:59.715276378+00:00"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 114, in test_floordiv
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$", grpc_status:2, created_time:"2023-12-17T09:12:59.715276378+00:00"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_mod

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:12:59.754506219+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 130, in test_mod
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:12:59.754506219+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_mul

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:12:59.789859226+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 78, in test_mul
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:12:59.789859226+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_pow

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:12:59.828513522+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 152, in test_pow
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:12:59.828513522+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_sub

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$", grpc_status:2, created_time:"2023-12-17T09:12:59.862773785+00:00"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 61, in test_sub
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$", grpc_status:2, created_time:"2023-12-17T09:12:59.862773785+00:00"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py.test_truediv

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$", grpc_status:2, created_time:"2023-12-17T09:12:59.894611432+00:00"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py", line 98, in test_truediv
    pdf, psdf = self.pdf, self.psdf
  File "/__w/spark/spark/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py", line 31, in psdf
    return ps.from_pandas(self.pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$", grpc_status:2, created_time:"2023-12-17T09:12:59.894611432+00:00"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_backfill

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "chunkSize"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"chunkSize", grpc_status:2, created_time:"2023-12-17T09:31:23.526138338+00:00"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 41, in test_backfill
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "chunkSize"
	debug_error_string = "UNKNOWN:Error received from peer  {grpc_message:"chunkSize", grpc_status:2, created_time:"2023-12-17T09:31:23.526138338+00:00"}"
>

Check failure on line 1 in python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py.test_bfill

<_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:31:23.558284032+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/pandas/tests/computation/test_missing_data.py", line 423, in test_bfill
    psdf = ps.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/namespace.py", line 154, in from_pandas
    return DataFrame(pobj)
  File "/__w/spark/spark/python/pyspark/pandas/frame.py", line 573, in __init__
    internal = InternalFrame.from_pandas(pdf)
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in from_pandas
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/internal.py", line 1530, in <listcomp>
    index_spark_columns=[scol_for(sdf, col) for col in index_columns],
  File "/__w/spark/spark/python/pyspark/pandas/utils.py", line 611, in scol_for
    return sdf["`{}`".format(column_name)]
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/spark/spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1098, in _analyze
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1501, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1564, in _handle_rpc_error
    raise SparkConnectGrpcException(str(rpc_error)) from None
pyspark.errors.exceptions.connect.SparkConnectGrpcException: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNKNOWN
	details = "Could not initialize class org.apache.spark.sql.util.ArrowUtils$"
	debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-12-17T09:31:23.558284032+00:00", grpc_status:2, grpc_message:"Could not initialize class org.apache.spark.sql.util.ArrowUtils$"}"
>