Description
I have installed Scala, Spark and Python3 on Ubuntu OS. I am running below code in nootbook.
Could you please help me to resolve this issue?
from pyspark import SparkConf, SparkContext
import sys
inputs = sys.argv[1]
output = sys.argv[2]
def words_once(line):
#for w in line.split():
words = line.split()
words[3] = int(words[3])
yield tuple.(words)
def word_filter(wiki_page):
if wiki_page[1]== "en" and wiki_page[2] == "Main Page" and not wiki_page[2].startWith("Special:"):
return wiki_page
def max_val(x, y):
return max(x,y)
def get_key(kv):
return kv[0]
def map_key_value(wiki_page)
return (wiki_page[0],wiki_page[3])
def output_format(kv):
k, v = kv
return '%s %i' % (k, v)
text = sc.textFile(inputs)
words = text.flatMap(words_once)
word_filter = words.filter(word_filter)
key_pair = word_filter.map(map_key_value)
wordcount = key_pair.reduceByKey(mak_val)
outdata = wordcount.sortBy(get_key).map(output_format)
outdata.saveAsTextFile(output)
But I am getting error in line - outdata = wordcount.sortBy(get_key).map(output_format)
Error Is -
--------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<ipython-input-25-58ad9a24ae79> in <module>
----> 1 outdata = wordcount.sortBy(get_key).map(output_format)
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py in sortBy(self, keyfunc, ascending, numPartitions)
697 [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
698 """
--> 699 return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
700
701 def glom(self):
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py in sortByKey(self, ascending, numPartitions, keyfunc)
665 # the key-space into bins such that the bins have roughly the same
666 # number of (key, value) pairs falling into them
--> 667 rddSize = self.count()
668 if not rddSize:
669 return self # empty RDD
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py in count(self)
1053 3
1054 """
-> 1055 return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
1056
1057 def stats(self):
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py in sum(self)
1044 6.0
1045 """
-> 1046 return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
1047
1048 def count(self):
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py in fold(self, zeroValue, op)
915 # zeroValue provided to each partition is unique from the one provided
916 # to the final reduce call
--> 917 vals = self.mapPartitions(func).collect()
918 return reduce(op, vals, zeroValue)
919
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/pyspark/rdd.py in collect(self)
814 """
815 with SCCallSiteSync(self.context) as css:
--> 816 sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
817 return list(_load_from_socket(sock_info, self._jrdd_deserializer))
818
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in _call_(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/pyspark/sql/utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
~/Downloads/spark-2.4.4-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}
{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 4.0 failed 1 times, most recent failure: Lost task 1.0 in stage 4.0 (TID 9, localhost, executor driver): org.apache.spark.SparkException:
Bad data in pyspark.daemon's standard output. Invalid port number:
459092027 (0x1b5d303b)
Python command to execute the daemon was:
ipython -m pyspark.daemon
Check that you don't have any unexpected modules or libraries in
your PYTHONPATH:
/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/python/lib/pyspark.zip:/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip:/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/jars/spark-core_2.11-2.4.4.jar:/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip:/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/python/:
Also, check if you have a sitecustomize.py module in your python path,
or in your python installation, that is printing to standard output
at org.apache.spark.api.python.PythonWorkerFactory.startDaemon(PythonWorkerFactory.scala:221)
at org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:122)
at org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:95)
at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:117)
at org.apache.spark.api.python.BasePythonRunner.compute(PythonRunner.scala:109)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:65)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.api.python.PairwiseRDD.compute(PythonRDD.scala:103)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1889)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1876)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1876)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2110)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2059)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2048)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:737)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2126)
at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:945)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
at org.apache.spark.rdd.RDD.collect(RDD.scala:944)
at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:166)
at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException:
Bad data in pyspark.daemon's standard output. Invalid port number:
459092027 (0x1b5d303b)
Python command to execute the daemon was:
ipython -m pyspark.daemon
Check that you don't have any unexpected modules or libraries in
your PYTHONPATH:
/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/python/lib/pyspark.zip:/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip:/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/jars/spark-core_2.11-2.4.4.jar:/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip:/home/asha/Downloads/spark-2.4.4-bin-hadoop2.7/python/:
Also, check if you have a sitecustomize.py module in your python path,
or in your python installation, that is printing to standard output
at org.apache.spark.api.python.PythonWorkerFactory.startDaemon(PythonWorkerFactory.scala:221)
at org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:122)
at org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:95)
at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:117)
at org.apache.spark.api.python.BasePythonRunner.compute(PythonRunner.scala:109)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:65)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.api.python.PairwiseRDD.compute(PythonRDD.scala:103)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
... 1 more