如何解决如何在pyspark的Jupyter笔记本中为MySQL设置JDBC驱动程序?
我正在尝试将一堆CSV文件逐行加载到使用pyspark配置在OpenShift上运行的mysql实例中。我有一个可以启动并运行的Jupyter笔记本。
下面是我的代码。并且失败,并显示特定的驱动程序错误
Py4JJavaError: An error occurred while calling o89.save.
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext
if __name__ == '__main__':
scSpark = SparkSession \
.builder \
.appName("reading csv") \
.getOrCreate()
if __name__ == '__main__':
scSpark = SparkSession \
.builder \
.appName("reading csv") \
.getOrCreate()
data_file = '/opt/app-root/src/data/train.psv'
sdfData = scSpark.read.csv(data_file,header=True,sep="|").cache()
print('Total Records = {}'.format(sdfData.count()))
sdfData.show()
sdfData.registerTempTable("train")
output = scSpark.sql('SELECT count(*) from train')
output.show()
+--------+
|count(1)|
+--------+
| 1168686|
+--------+
import os
os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages mysql:mysql-connector-java:jar:8.0.21 pyspark-shell'
output = scSpark.sql('SELECT * from train')
output.show()
output.write.format('jdbc').options(
url='jdbc:mysql://mysql-1-28d85/sepsis',driver='com.mysql.jdbc.Driver',#driver='mysql-connector-java.Driver',#driver='org.mysql.jdbc.Driver',dbtable='train',user='sepsis',password='Success_2020').mode('append').save()
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<ipython-input-57-114af97e0442> in <module>
11 dbtable='train',12 user='sepsis',---> 13 password='Success_2020').mode('append').save()
/opt/app-root/lib/python3.6/site-packages/pyspark/sql/readwriter.py in save(self,path,format,mode,partitionBy,**options)
735 self.format(format)
736 if path is None:
--> 737 self._jwrite.save()
738 else:
739 self._jwrite.save(path)
/opt/app-root/lib/python3.6/site-packages/py4j/java_gateway.py in __call__(self,*args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer,self.gateway_client,self.target_id,self.name)
1258
1259 for temp_arg in temp_args:
/opt/app-root/lib/python3.6/site-packages/pyspark/sql/utils.py in deco(*a,**kw)
61 def deco(*a,**kw):
62 try:
---> 63 return f(*a,**kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
/opt/app-root/lib/python3.6/site-packages/py4j/protocol.py in get_return_value(answer,gateway_client,target_id,name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id,".",name),value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o1641.save.
: java.lang.ClassNotFoundException: com.mysql.jdbc.Driver
at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
at java.lang.ClassLoader.loadClass(ClassLoader.java:419)
at java.lang.ClassLoader.loadClass(ClassLoader.java:352)
at org.apache.spark.sql.execution.datasources.jdbc.DriverRegistry$.register(DriverRegistry.scala:45)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions$$anonfun$5.apply(JDBCOptions.scala:99)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions$$anonfun$5.apply(JDBCOptions.scala:99)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions.<init>(JDBCOptions.scala:99)
at org.apache.spark.sql.execution.datasources.jdbc.JdbcOptionsInWrite.<init>(JDBCOptions.scala:190)
at org.apache.spark.sql.execution.datasources.jdbc.JdbcOptionsInWrite.<init>(JDBCOptions.scala:194)
at org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider.createRelation(JdbcRelationProvider.scala:45)
at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:45)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:83)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:81)
at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:676)
at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:676)
at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:80)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:127)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:75)
at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:676)
at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:285)
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:271)
at sun.reflect.GeneratedMethodAccessor67.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
使用软件包更改了代码。
这也是openshift,其中所有组件都作为pod运行,无法访问外部环境。
解决方法
java.lang.ClassNotFoundException:com.mysql.cj.jdbc.Driver
这说明了一切。您必须使用pyspark
或类似工具(特定于Jupyter),使用MySQL的JDBC驱动程序启动--driver-class-path
(或环境)。
对于Jupyter Notebook
如果使用Jupyter Notebook,则应设置
PYSPARK_SUBMIT_ARGS
环境变量,如下所示:import os os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.postgresql:postgresql:42.1.1 pyspark-shell'
更改--packages
以引用MySQL JDBC驱动程序。
一旦您转到spark的安装路径,就会有一个<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<form class="output-wrapper" name="output-wrapper">
<input type="text" id="inputA" name="inputA">
<input type="text" id="inputB" name="inputB">
<input type="text" id="result" name="result">
<div class="calc-btn-wrapper">
<div class="nmb-btn container">
<button type="button" value="1" onclick="add()">Add</button>
</div>
</div>
</form>
文件夹。下载您的mysql jdbc jar文件并将其放置到jars
文件夹中,然后您就不需要该命令或代码的任何选项。
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。