#!/usr/bin/env bash export HADOOP_CONF_DIR=$HOME/hadoop/conf export HADOOP_HOME=$HOME/hadoop export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_JAVA_OPTS="-Dhdp.version=2.6.3.0-235" $HOME/spark/bin/spark-submit\ --master yarn\ --deploy-mode cluster\ --queue default\ --num-executors 10\ --conf "spark.driver.memory=1g"\ --conf "spark.driver.memoryOverhead=600m"\ --conf "spark.executor.memory=600m"\ --conf "spark.executor.memoryOverhead=300m"\ --name "$1"\ --conf "spark.sql.autoBroadcastJoinThreshold=-1"\ --conf "spark.driver.extraJavaOptions=-Dhdp.version=2.6.3.0-235"\ --conf "spark.yarn.am.extraJavaOptions=-Dhdp.version=2.6.3.0-235"\ --conf "spark.sql.shuffle.partitions=100" $1