org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse

跑sparkPis示例程序

[root@node01 bin]# ./spark-submit --master spark://node01:7077 --class org.apache.spark.examples.SparkPi ../examples/jars/spark-examples_2.11-2.0.0.jar 100

报如下错误的原因可能是分配的任务数过多导致内存不足。

解决办法:减少任务数

19/04/17 04:19:17 WARN NettyRpcEndpointRef: Error sending message [message = RemoveExecutor(1,Command exited with code 0)] in 1 attempts

org.apache.spark.SparkException: Exception thrown in awaitResult

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:77)

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:75)

at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:36)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at scala.PartialFunction$OrElse.apply(PartialFunction.scala:167)

at org.apache.spark.rpc.RpcTimeout.awaitResult(RpcTimeout.scala:83)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:102)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:78)

at org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.removeExecutor(CoarseGrainedSchedulerBackend.scala:412)

at org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend.executorRemoved(StandaloneSchedulerBackend.scala:157)

at org.apache.spark.deploy.client.StandaloneAppClient$ClientEndpoint$$anonfun$receive$1.applyOrElse(StandaloneAppClient.scala:185)

at org.apache.spark.rpc.netty.Inbox$$anonfun$process$1.apply$mcV$sp(Inbox.scala:117)

at org.apache.spark.rpc.netty.Inbox.safelyCall(Inbox.scala:205)

at org.apache.spark.rpc.netty.Inbox.process(Inbox.scala:101)

at org.apache.spark.rpc.netty.Dispatcher$MessageLoop.run(Dispatcher.scala:211)

at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

at java.lang.Thread.run(Thread.java:748)

Caused by: org.apache.spark.SparkException: Could not find CoarseGrainedScheduler.

at org.apache.spark.rpc.netty.Dispatcher.postMessage(Dispatcher.scala:152)

at org.apache.spark.rpc.netty.Dispatcher.postLocalMessage(Dispatcher.scala:127)

at org.apache.spark.rpc.netty.NettyRpcEnv.ask(NettyRpcEnv.scala:225)

at org.apache.spark.rpc.netty.NettyRpcEndpointRef.ask(NettyRpcEnv.scala:508)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:101)

... 11 more

19/04/17 04:19:21 WARN NettyRpcEndpointRef: Error sending message [message = RemoveExecutor(1,Command exited with code 0)] in 2 attempts

org.apache.spark.SparkException: Exception thrown in awaitResult

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:77)

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:75)

at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:36)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at scala.PartialFunction$OrElse.apply(PartialFunction.scala:167)

at org.apache.spark.rpc.RpcTimeout.awaitResult(RpcTimeout.scala:83)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:102)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:78)

at org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.removeExecutor(CoarseGrainedSchedulerBackend.scala:412)

at org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend.executorRemoved(StandaloneSchedulerBackend.scala:157)

at org.apache.spark.deploy.client.StandaloneAppClient$ClientEndpoint$$anonfun$receive$1.applyOrElse(StandaloneAppClient.scala:185)

at org.apache.spark.rpc.netty.Inbox$$anonfun$process$1.apply$mcV$sp(Inbox.scala:117)

at org.apache.spark.rpc.netty.Inbox.safelyCall(Inbox.scala:205)

at org.apache.spark.rpc.netty.Inbox.process(Inbox.scala:101)

at org.apache.spark.rpc.netty.Dispatcher$MessageLoop.run(Dispatcher.scala:211)

at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

at java.lang.Thread.run(Thread.java:748)

Caused by: org.apache.spark.SparkException: Could not find CoarseGrainedScheduler.

at org.apache.spark.rpc.netty.Dispatcher.postMessage(Dispatcher.scala:152)

at org.apache.spark.rpc.netty.Dispatcher.postLocalMessage(Dispatcher.scala:127)

at org.apache.spark.rpc.netty.NettyRpcEnv.ask(NettyRpcEnv.scala:225)

at org.apache.spark.rpc.netty.NettyRpcEndpointRef.ask(NettyRpcEnv.scala:508)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:101)

... 11 more

19/04/17 04:19:24 WARN NettyRpcEndpointRef: Error sending message [message = RemoveExecutor(1,Command exited with code 0)] in 3 attempts

org.apache.spark.SparkException: Exception thrown in awaitResult

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:77)

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:75)

at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:36)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at scala.PartialFunction$OrElse.apply(PartialFunction.scala:167)

at org.apache.spark.rpc.RpcTimeout.awaitResult(RpcTimeout.scala:83)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:102)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:78)

at org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.removeExecutor(CoarseGrainedSchedulerBackend.scala:412)

at org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend.executorRemoved(StandaloneSchedulerBackend.scala:157)

at org.apache.spark.deploy.client.StandaloneAppClient$ClientEndpoint$$anonfun$receive$1.applyOrElse(StandaloneAppClient.scala:185)

at org.apache.spark.rpc.netty.Inbox$$anonfun$process$1.apply$mcV$sp(Inbox.scala:117)

at org.apache.spark.rpc.netty.Inbox.safelyCall(Inbox.scala:205)

at org.apache.spark.rpc.netty.Inbox.process(Inbox.scala:101)

at org.apache.spark.rpc.netty.Dispatcher$MessageLoop.run(Dispatcher.scala:211)

at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

at java.lang.Thread.run(Thread.java:748)

Caused by: org.apache.spark.SparkException: Could not find CoarseGrainedScheduler.

at org.apache.spark.rpc.netty.Dispatcher.postMessage(Dispatcher.scala:152)

at org.apache.spark.rpc.netty.Dispatcher.postLocalMessage(Dispatcher.scala:127)

at org.apache.spark.rpc.netty.NettyRpcEnv.ask(NettyRpcEnv.scala:225)

at org.apache.spark.rpc.netty.NettyRpcEndpointRef.ask(NettyRpcEnv.scala:508)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:101)

... 11 more

19/04/17 04:19:24 ERROR Inbox: Ignoring error

org.apache.spark.SparkException: Error notifying standalone scheduler's driver endpoint

at org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.removeExecutor(CoarseGrainedSchedulerBackend.scala:415)

at org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend.executorRemoved(StandaloneSchedulerBackend.scala:157)

at org.apache.spark.deploy.client.StandaloneAppClient$ClientEndpoint$$anonfun$receive$1.applyOrElse(StandaloneAppClient.scala:185)

at org.apache.spark.rpc.netty.Inbox$$anonfun$process$1.apply$mcV$sp(Inbox.scala:117)

at org.apache.spark.rpc.netty.Inbox.safelyCall(Inbox.scala:205)

at org.apache.spark.rpc.netty.Inbox.process(Inbox.scala:101)

at org.apache.spark.rpc.netty.Dispatcher$MessageLoop.run(Dispatcher.scala:211)

at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

at java.lang.Thread.run(Thread.java:748)

Caused by: org.apache.spark.SparkException: Error sending message [message = RemoveExecutor(1,Command exited with code 0)]

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:119)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:78)

at org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.removeExecutor(CoarseGrainedSchedulerBackend.scala:412)

... 9 more

Caused by: org.apache.spark.SparkException: Exception thrown in awaitResult

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:77)

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:75)

at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:36)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at scala.PartialFunction$OrElse.apply(PartialFunction.scala:167)

at org.apache.spark.rpc.RpcTimeout.awaitResult(RpcTimeout.scala:83)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:102)

... 11 more

Caused by: org.apache.spark.SparkException: Could not find CoarseGrainedScheduler.

at org.apache.spark.rpc.netty.Dispatcher.postMessage(Dispatcher.scala:152)

at org.apache.spark.rpc.netty.Dispatcher.postLocalMessage(Dispatcher.scala:127)

at org.apache.spark.rpc.netty.NettyRpcEnv.ask(NettyRpcEnv.scala:225)

at org.apache.spark.rpc.netty.NettyRpcEndpointRef.ask(NettyRpcEnv.scala:508)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:101)

... 11 more

19/04/17 04:19:24 INFO StandaloneAppClient$ClientEndpoint: Executor updated: app-20190417041843-0000/0 is now EXITED (Command exited with code 0)

19/04/17 04:19:24 INFO StandaloneSchedulerBackend: Executor app-20190417041843-0000/0 removed: Command exited with code 0

19/04/17 04:19:24 WARN NettyRpcEndpointRef: Error sending message [message = RemoveExecutor(0,Command exited with code 0)] in 1 attempts

org.apache.spark.SparkException: Exception thrown in awaitResult

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:77)

at org.apache.spark.rpc.RpcTimeout$$anonfun$1.applyOrElse(RpcTimeout.scala:75)

at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:36)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:59)

at scala.PartialFunction$OrElse.apply(PartialFunction.scala:167)

at org.apache.spark.rpc.RpcTimeout.awaitResult(RpcTimeout.scala:83)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:102)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:78)

at org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.removeExecutor(CoarseGrainedSchedulerBackend.scala:412)

at org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend.executorRemoved(StandaloneSchedulerBackend.scala:157)

at org.apache.spark.deploy.client.StandaloneAppClient$ClientEndpoint$$anonfun$receive$1.applyOrElse(StandaloneAppClient.scala:185)

at org.apache.spark.rpc.netty.Inbox$$anonfun$process$1.apply$mcV$sp(Inbox.scala:117)

at org.apache.spark.rpc.netty.Inbox.safelyCall(Inbox.scala:205)

at org.apache.spark.rpc.netty.Inbox.process(Inbox.scala:101)

at org.apache.spark.rpc.netty.Dispatcher$MessageLoop.run(Dispatcher.scala:211)

at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

at java.lang.Thread.run(Thread.java:748)

Caused by: org.apache.spark.SparkException: Could not find CoarseGrainedScheduler.

at org.apache.spark.rpc.netty.Dispatcher.postMessage(Dispatcher.scala:152)

at org.apache.spark.rpc.netty.Dispatcher.postLocalMessage(Dispatcher.scala:127)

at org.apache.spark.rpc.netty.NettyRpcEnv.ask(NettyRpcEnv.scala:225)

at org.apache.spark.rpc.netty.NettyRpcEndpointRef.ask(NettyRpcEnv.scala:508)

at org.apache.spark.rpc.RpcEndpointRef.askWithRetry(RpcEndpointRef.scala:101)

... 11 more

19/04/17 04:19:24 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!

19/04/17 04:19:24 INFO MemoryStore: MemoryStore cleared

19/04/17 04:19:24 INFO BlockManager: BlockManager stopped

19/04/17 04:19:24 INFO BlockManagerMaster: BlockManagerMaster stopped

19/04/17 04:19:24 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!

19/04/17 04:19:27 WARN NettyRpcEnv: Ignored failure: java.util.concurrent.RejectedExecutionException: Task java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask@c591872 rejected from java.util.concurrent.ScheduledThreadPoolExecutor@43603368[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0]